2014-08-12 08:10:52 -06:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 21:26:29 -07:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2018-02-06 09:40:38 -07:00
|
|
|
# Copyright 2018 New Vector Ltd
|
2014-08-12 08:10:52 -06:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2014-08-12 20:14:34 -06:00
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
"""Contains handlers for federation events."""
|
2018-04-17 15:11:19 -06:00
|
|
|
|
|
|
|
import itertools
|
|
|
|
import logging
|
2018-04-27 04:40:06 -06:00
|
|
|
import sys
|
2018-04-17 15:11:19 -06:00
|
|
|
|
2018-04-27 04:40:06 -06:00
|
|
|
import six
|
2018-07-20 23:47:18 -06:00
|
|
|
from six import iteritems, itervalues
|
|
|
|
from six.moves import http_client, zip
|
2018-07-09 00:09:20 -06:00
|
|
|
|
|
|
|
from signedjson.key import decode_verify_key_bytes
|
|
|
|
from signedjson.sign import verify_signed_json
|
2016-02-23 08:11:25 -07:00
|
|
|
from unpaddedbase64 import decode_base64
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2018-07-09 00:09:20 -06:00
|
|
|
from twisted.internet import defer
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2018-07-09 00:09:20 -06:00
|
|
|
from synapse.api.constants import EventTypes, Membership, RejectedReason
|
2014-11-26 09:06:20 -07:00
|
|
|
from synapse.api.errors import (
|
2018-07-09 00:09:20 -06:00
|
|
|
AuthError,
|
|
|
|
CodeMessageException,
|
2018-01-22 11:11:18 -07:00
|
|
|
FederationDeniedError,
|
2018-07-09 00:09:20 -06:00
|
|
|
FederationError,
|
|
|
|
StoreError,
|
|
|
|
SynapseError,
|
2014-11-26 09:06:20 -07:00
|
|
|
)
|
2014-11-14 09:45:39 -07:00
|
|
|
from synapse.crypto.event_signing import (
|
2018-07-09 00:09:20 -06:00
|
|
|
add_hashes_and_signatures,
|
|
|
|
compute_event_signature,
|
2014-11-14 09:45:39 -07:00
|
|
|
)
|
2018-07-09 00:09:20 -06:00
|
|
|
from synapse.events.validator import EventValidator
|
2018-06-27 04:27:32 -06:00
|
|
|
from synapse.state import resolve_events_with_factory
|
2016-05-16 12:17:03 -06:00
|
|
|
from synapse.types import UserID, get_domain_from_id
|
2018-07-09 00:09:20 -06:00
|
|
|
from synapse.util import logcontext, unwrapFirstError
|
|
|
|
from synapse.util.async import Linearizer
|
|
|
|
from synapse.util.distributor import user_joined_room
|
|
|
|
from synapse.util.frozenutils import unfreeze
|
|
|
|
from synapse.util.logutils import log_function
|
2015-05-12 03:35:45 -06:00
|
|
|
from synapse.util.retryutils import NotRetryingDestination
|
2018-07-16 04:38:45 -06:00
|
|
|
from synapse.visibility import filter_events_for_server
|
2015-05-12 03:35:45 -06:00
|
|
|
|
2018-07-09 00:09:20 -06:00
|
|
|
from ._base import BaseHandler
|
2015-12-10 09:26:08 -07:00
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class FederationHandler(BaseHandler):
|
2014-08-26 12:49:42 -06:00
|
|
|
"""Handles events that originated from federation.
|
|
|
|
Responsible for:
|
|
|
|
a) handling received Pdus before handing them on as Events to the rest
|
|
|
|
of the home server (including auth and state conflict resoultion)
|
|
|
|
b) converting events that were produced by local clients that may need
|
|
|
|
to be sent to remote home servers.
|
2014-11-12 09:20:21 -07:00
|
|
|
c) doing the necessary dances to invite remote users and join remote
|
|
|
|
rooms.
|
2014-08-26 12:49:42 -06:00
|
|
|
"""
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2014-08-21 07:38:22 -06:00
|
|
|
def __init__(self, hs):
|
|
|
|
super(FederationHandler, self).__init__(hs)
|
|
|
|
|
2015-11-05 09:43:19 -07:00
|
|
|
self.hs = hs
|
|
|
|
|
2014-08-26 12:49:42 -06:00
|
|
|
self.store = hs.get_datastore()
|
2018-07-31 08:44:05 -06:00
|
|
|
self.federation_client = hs.get_federation_client()
|
2014-08-26 12:49:42 -06:00
|
|
|
self.state_handler = hs.get_state_handler()
|
|
|
|
self.server_name = hs.hostname
|
2014-11-14 09:45:39 -07:00
|
|
|
self.keyring = hs.get_keyring()
|
2017-05-18 11:17:40 -06:00
|
|
|
self.action_generator = hs.get_action_generator()
|
2017-06-30 09:20:30 -06:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2017-07-06 10:55:51 -06:00
|
|
|
self.pusher_pool = hs.get_pusherpool()
|
2017-10-03 06:53:09 -06:00
|
|
|
self.spam_checker = hs.get_spam_checker()
|
2018-01-15 09:52:07 -07:00
|
|
|
self.event_creation_handler = hs.get_event_creation_handler()
|
2018-05-18 04:18:39 -06:00
|
|
|
self._server_notices_mxid = hs.config.server_notices_mxid
|
2014-08-26 12:49:42 -06:00
|
|
|
|
2014-10-17 11:56:42 -06:00
|
|
|
# When joining a room we need to queue any events for that room up
|
|
|
|
self.room_queues = {}
|
2017-03-09 09:20:13 -07:00
|
|
|
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
2018-06-27 04:27:32 -06:00
|
|
|
def on_receive_pdu(
|
|
|
|
self, origin, pdu, get_missing=True, sent_to_us_directly=False,
|
|
|
|
):
|
2017-03-09 09:20:13 -07:00
|
|
|
""" Process a PDU received via a federation /send/ transaction, or
|
|
|
|
via backfill of missing prev_events
|
|
|
|
|
|
|
|
Args:
|
|
|
|
origin (str): server which initiated the /send/ transaction. Will
|
|
|
|
be used to fetch missing events or state.
|
|
|
|
pdu (FrozenEvent): received PDU
|
|
|
|
get_missing (bool): True if we should fetch missing prev_events
|
|
|
|
|
|
|
|
Returns (Deferred): completes with None
|
|
|
|
"""
|
|
|
|
|
|
|
|
# We reprocess pdus when we have seen them only as outliers
|
2018-06-07 09:18:57 -06:00
|
|
|
existing = yield self.store.get_event(
|
|
|
|
pdu.event_id,
|
|
|
|
allow_none=True,
|
|
|
|
allow_rejected=True,
|
2017-03-09 09:20:13 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
# FIXME: Currently we fetch an event again when we already have it
|
|
|
|
# if it has been marked as an outlier.
|
|
|
|
|
|
|
|
already_seen = (
|
|
|
|
existing and (
|
|
|
|
not existing.internal_metadata.is_outlier()
|
|
|
|
or pdu.internal_metadata.is_outlier()
|
|
|
|
)
|
|
|
|
)
|
|
|
|
if already_seen:
|
|
|
|
logger.debug("Already seen pdu %s", pdu.event_id)
|
|
|
|
return
|
|
|
|
|
2018-04-17 15:11:19 -06:00
|
|
|
# do some initial sanity-checking of the event. In particular, make
|
|
|
|
# sure it doesn't have hundreds of prev_events or auth_events, which
|
2018-04-17 16:41:12 -06:00
|
|
|
# could cause a huge state resolution or cascade of event fetches.
|
|
|
|
try:
|
|
|
|
self._sanity_check_event(pdu)
|
|
|
|
except SynapseError as err:
|
2018-04-17 15:11:19 -06:00
|
|
|
raise FederationError(
|
|
|
|
"ERROR",
|
2018-04-17 16:41:12 -06:00
|
|
|
err.code,
|
|
|
|
err.msg,
|
2018-04-17 15:11:19 -06:00
|
|
|
affected=pdu.event_id,
|
|
|
|
)
|
|
|
|
|
2017-03-14 05:26:57 -06:00
|
|
|
# If we are currently in the process of joining this room, then we
|
|
|
|
# queue up events for later processing.
|
|
|
|
if pdu.room_id in self.room_queues:
|
|
|
|
logger.info("Ignoring PDU %s for room %s from %s for now; join "
|
|
|
|
"in progress", pdu.event_id, pdu.room_id, origin)
|
|
|
|
self.room_queues[pdu.room_id].append((pdu, origin))
|
|
|
|
return
|
|
|
|
|
2017-10-03 04:09:51 -06:00
|
|
|
# If we're no longer in the room just ditch the event entirely. This
|
|
|
|
# is probably an old server that has come back and thinks we're still
|
2017-10-03 07:10:21 -06:00
|
|
|
# in the room (or we've been rejoined to the room by a state reset).
|
2017-10-03 04:09:51 -06:00
|
|
|
#
|
|
|
|
# If we were never in the room then maybe our database got vaped and
|
|
|
|
# we should check if we *are* in fact in the room. If we are then we
|
|
|
|
# can magically rejoin the room.
|
|
|
|
is_in_room = yield self.auth.check_host_in_room(
|
|
|
|
pdu.room_id,
|
|
|
|
self.server_name
|
|
|
|
)
|
|
|
|
if not is_in_room:
|
|
|
|
was_in_room = yield self.store.was_host_joined(
|
|
|
|
pdu.room_id, self.server_name,
|
|
|
|
)
|
|
|
|
if was_in_room:
|
|
|
|
logger.info(
|
|
|
|
"Ignoring PDU %s for room %s from %s as we've left the room!",
|
|
|
|
pdu.event_id, pdu.room_id, origin,
|
|
|
|
)
|
2018-06-27 04:27:32 -06:00
|
|
|
defer.returnValue(None)
|
2017-10-03 04:09:51 -06:00
|
|
|
|
2017-03-09 09:20:13 -07:00
|
|
|
state = None
|
|
|
|
auth_chain = []
|
|
|
|
|
|
|
|
# Get missing pdus if necessary.
|
|
|
|
if not pdu.internal_metadata.is_outlier():
|
|
|
|
# We only backfill backwards to the min depth.
|
|
|
|
min_depth = yield self.get_min_depth_for_context(
|
|
|
|
pdu.room_id
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"_handle_new_pdu min_depth for %s: %d",
|
|
|
|
pdu.room_id, min_depth
|
|
|
|
)
|
|
|
|
|
|
|
|
prevs = {e_id for e_id, _ in pdu.prev_events}
|
2018-04-17 11:30:53 -06:00
|
|
|
seen = yield self.store.have_seen_events(prevs)
|
2017-03-09 09:20:13 -07:00
|
|
|
|
|
|
|
if min_depth and pdu.depth < min_depth:
|
|
|
|
# This is so that we don't notify the user about this
|
|
|
|
# message, to work around the fact that some events will
|
|
|
|
# reference really really old events we really don't want to
|
|
|
|
# send to the clients.
|
|
|
|
pdu.internal_metadata.outlier = True
|
|
|
|
elif min_depth and pdu.depth > min_depth:
|
|
|
|
if get_missing and prevs - seen:
|
|
|
|
# If we're missing stuff, ensure we only fetch stuff one
|
|
|
|
# at a time.
|
|
|
|
logger.info(
|
|
|
|
"Acquiring lock for room %r to fetch %d missing events: %r...",
|
|
|
|
pdu.room_id, len(prevs - seen), list(prevs - seen)[:5],
|
|
|
|
)
|
|
|
|
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
|
|
|
|
logger.info(
|
|
|
|
"Acquired lock for room %r to fetch %d missing events",
|
|
|
|
pdu.room_id, len(prevs - seen),
|
|
|
|
)
|
|
|
|
|
|
|
|
yield self._get_missing_events_for_pdu(
|
|
|
|
origin, pdu, prevs, min_depth
|
|
|
|
)
|
2017-04-28 04:26:46 -06:00
|
|
|
|
|
|
|
# Update the set of things we've seen after trying to
|
|
|
|
# fetch the missing stuff
|
2018-04-17 11:30:53 -06:00
|
|
|
seen = yield self.store.have_seen_events(prevs)
|
2017-05-03 03:06:43 -06:00
|
|
|
|
|
|
|
if not prevs - seen:
|
2017-04-28 04:55:25 -06:00
|
|
|
logger.info(
|
|
|
|
"Found all missing prev events for %s", pdu.event_id
|
|
|
|
)
|
2017-04-27 07:38:21 -06:00
|
|
|
elif prevs - seen:
|
|
|
|
logger.info(
|
|
|
|
"Not fetching %d missing events for room %r,event %s: %r...",
|
|
|
|
len(prevs - seen), pdu.room_id, pdu.event_id,
|
|
|
|
list(prevs - seen)[:5],
|
|
|
|
)
|
2017-03-09 09:20:13 -07:00
|
|
|
|
2018-06-27 04:27:32 -06:00
|
|
|
if sent_to_us_directly and prevs - seen:
|
|
|
|
# If they have sent it to us directly, and the server
|
|
|
|
# isn't telling us about the auth events that it's
|
|
|
|
# made a message referencing, we explode
|
|
|
|
raise FederationError(
|
|
|
|
"ERROR",
|
|
|
|
403,
|
2018-06-27 04:40:27 -06:00
|
|
|
(
|
|
|
|
"Your server isn't divulging details about prev_events "
|
|
|
|
"referenced in this event."
|
|
|
|
),
|
2018-06-27 04:27:32 -06:00
|
|
|
affected=pdu.event_id,
|
2017-03-09 09:20:13 -07:00
|
|
|
)
|
2018-06-27 04:27:32 -06:00
|
|
|
elif prevs - seen:
|
2018-06-27 04:36:03 -06:00
|
|
|
# Calculate the state of the previous events, and
|
|
|
|
# de-conflict them to find the current state.
|
2018-06-27 04:27:32 -06:00
|
|
|
state_groups = []
|
|
|
|
auth_chains = set()
|
|
|
|
try:
|
2018-06-27 04:36:03 -06:00
|
|
|
# Get the state of the events we know about
|
2018-06-27 04:27:32 -06:00
|
|
|
ours = yield self.store.get_state_groups(pdu.room_id, list(seen))
|
|
|
|
state_groups.append(ours)
|
|
|
|
|
2018-06-27 04:36:03 -06:00
|
|
|
# Ask the remote server for the states we don't
|
|
|
|
# know about
|
2018-06-27 04:27:32 -06:00
|
|
|
for p in prevs - seen:
|
2018-06-27 04:38:14 -06:00
|
|
|
state, got_auth_chain = (
|
2018-07-31 08:44:05 -06:00
|
|
|
yield self.federation_client.get_state_for_room(
|
2018-06-27 04:38:14 -06:00
|
|
|
origin, pdu.room_id, p
|
2018-06-27 04:40:27 -06:00
|
|
|
)
|
|
|
|
)
|
2018-06-27 04:36:03 -06:00
|
|
|
auth_chains.update(got_auth_chain)
|
|
|
|
state_group = {(x.type, x.state_key): x.event_id for x in state}
|
2018-06-27 04:27:32 -06:00
|
|
|
state_groups.append(state_group)
|
|
|
|
|
2018-06-27 04:36:03 -06:00
|
|
|
# Resolve any conflicting state
|
2018-06-27 04:27:32 -06:00
|
|
|
def fetch(ev_ids):
|
|
|
|
return self.store.get_events(
|
2018-06-27 04:36:03 -06:00
|
|
|
ev_ids, get_prev_content=False, check_redacted=False
|
2018-06-27 04:27:32 -06:00
|
|
|
)
|
2017-03-09 09:20:13 -07:00
|
|
|
|
2018-06-27 04:36:03 -06:00
|
|
|
state_map = yield resolve_events_with_factory(
|
|
|
|
state_groups, {pdu.event_id: pdu}, fetch
|
|
|
|
)
|
2018-06-27 04:27:32 -06:00
|
|
|
|
2018-06-27 04:36:03 -06:00
|
|
|
state = (yield self.store.get_events(state_map.values())).values()
|
|
|
|
auth_chain = list(auth_chains)
|
2018-06-27 04:27:32 -06:00
|
|
|
except Exception:
|
|
|
|
raise FederationError(
|
|
|
|
"ERROR",
|
|
|
|
403,
|
|
|
|
"We can't get valid state history.",
|
|
|
|
affected=pdu.event_id,
|
|
|
|
)
|
2017-03-09 09:20:13 -07:00
|
|
|
|
|
|
|
yield self._process_received_pdu(
|
|
|
|
origin,
|
|
|
|
pdu,
|
|
|
|
state=state,
|
|
|
|
auth_chain=auth_chain,
|
|
|
|
)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth):
|
|
|
|
"""
|
|
|
|
Args:
|
|
|
|
origin (str): Origin of the pdu. Will be called to get the missing events
|
|
|
|
pdu: received pdu
|
2017-04-28 05:46:53 -06:00
|
|
|
prevs (set(str)): List of event ids which we are missing
|
2017-03-09 09:20:13 -07:00
|
|
|
min_depth (int): Minimum depth of events to return.
|
|
|
|
"""
|
|
|
|
# We recalculate seen, since it may have changed.
|
2018-04-17 11:30:53 -06:00
|
|
|
seen = yield self.store.have_seen_events(prevs)
|
2017-03-09 09:20:13 -07:00
|
|
|
|
|
|
|
if not prevs - seen:
|
2017-04-28 05:46:53 -06:00
|
|
|
return
|
2017-03-09 09:20:13 -07:00
|
|
|
|
|
|
|
latest = yield self.store.get_latest_event_ids_in_room(
|
|
|
|
pdu.room_id
|
|
|
|
)
|
|
|
|
|
|
|
|
# We add the prev events that we have seen to the latest
|
|
|
|
# list to ensure the remote server doesn't give them to us
|
|
|
|
latest = set(latest)
|
|
|
|
latest |= seen
|
|
|
|
|
|
|
|
logger.info(
|
2017-04-27 08:16:21 -06:00
|
|
|
"Missing %d events for room %r pdu %s: %r...",
|
2017-04-27 07:38:21 -06:00
|
|
|
len(prevs - seen), pdu.room_id, pdu.event_id, list(prevs - seen)[:5]
|
2017-03-09 09:20:13 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
# XXX: we set timeout to 10s to help workaround
|
|
|
|
# https://github.com/matrix-org/synapse/issues/1733.
|
|
|
|
# The reason is to avoid holding the linearizer lock
|
|
|
|
# whilst processing inbound /send transactions, causing
|
|
|
|
# FDs to stack up and block other inbound transactions
|
|
|
|
# which empirically can currently take up to 30 minutes.
|
|
|
|
#
|
|
|
|
# N.B. this explicitly disables retry attempts.
|
|
|
|
#
|
|
|
|
# N.B. this also increases our chances of falling back to
|
|
|
|
# fetching fresh state for the room if the missing event
|
|
|
|
# can't be found, which slightly reduces our security.
|
|
|
|
# it may also increase our DAG extremity count for the room,
|
|
|
|
# causing additional state resolution? See #1760.
|
|
|
|
# However, fetching state doesn't hold the linearizer lock
|
|
|
|
# apparently.
|
|
|
|
#
|
|
|
|
# see https://github.com/matrix-org/synapse/pull/1744
|
|
|
|
|
2018-07-31 08:44:05 -06:00
|
|
|
missing_events = yield self.federation_client.get_missing_events(
|
2017-03-09 09:20:13 -07:00
|
|
|
origin,
|
|
|
|
pdu.room_id,
|
|
|
|
earliest_events_ids=list(latest),
|
|
|
|
latest_events=[pdu],
|
|
|
|
limit=10,
|
|
|
|
min_depth=min_depth,
|
|
|
|
timeout=10000,
|
|
|
|
)
|
|
|
|
|
2017-04-27 07:38:21 -06:00
|
|
|
logger.info(
|
|
|
|
"Got %d events: %r...",
|
|
|
|
len(missing_events), [e.event_id for e in missing_events[:5]]
|
|
|
|
)
|
|
|
|
|
2017-03-09 09:20:13 -07:00
|
|
|
# We want to sort these by depth so we process them and
|
|
|
|
# tell clients about them in order.
|
|
|
|
missing_events.sort(key=lambda x: x.depth)
|
|
|
|
|
|
|
|
for e in missing_events:
|
2017-04-27 07:38:21 -06:00
|
|
|
logger.info("Handling found event %s", e.event_id)
|
2018-06-27 04:27:32 -06:00
|
|
|
try:
|
|
|
|
yield self.on_receive_pdu(
|
|
|
|
origin,
|
|
|
|
e,
|
|
|
|
get_missing=False
|
|
|
|
)
|
|
|
|
except FederationError as e:
|
|
|
|
if e.code == 403:
|
|
|
|
logger.warn("Event %s failed history check.")
|
|
|
|
else:
|
|
|
|
raise
|
2017-03-09 09:20:13 -07:00
|
|
|
|
2014-08-26 12:49:42 -06:00
|
|
|
@log_function
|
|
|
|
@defer.inlineCallbacks
|
2017-03-14 05:26:57 -06:00
|
|
|
def _process_received_pdu(self, origin, pdu, state, auth_chain):
|
2017-03-09 09:20:13 -07:00
|
|
|
""" Called when we have a new pdu. We need to do auth checks and put it
|
|
|
|
through the StateHandler.
|
2014-08-26 12:49:42 -06:00
|
|
|
"""
|
2014-11-14 14:25:02 -07:00
|
|
|
event = pdu
|
2014-08-26 12:49:42 -06:00
|
|
|
|
2017-03-14 05:26:57 -06:00
|
|
|
logger.debug("Processing event: %s", event)
|
2014-10-16 09:56:51 -06:00
|
|
|
|
2014-11-26 03:41:08 -07:00
|
|
|
# FIXME (erikj): Awful hack to make the case where we are not currently
|
|
|
|
# in the room work
|
2016-09-02 07:26:07 -06:00
|
|
|
# If state and auth_chain are None, then we don't need to do this check
|
|
|
|
# as we already know we have enough state in the DB to handle this
|
|
|
|
# event.
|
2016-09-02 06:40:07 -06:00
|
|
|
if state and auth_chain and not event.internal_metadata.is_outlier():
|
|
|
|
is_in_room = yield self.auth.check_host_in_room(
|
|
|
|
event.room_id,
|
|
|
|
self.server_name
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
is_in_room = True
|
|
|
|
if not is_in_room:
|
|
|
|
logger.info(
|
|
|
|
"Got event for room we're not in: %r %r",
|
|
|
|
event.room_id, event.event_id
|
|
|
|
)
|
2014-11-27 09:02:26 -07:00
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
try:
|
2018-08-01 06:39:07 -06:00
|
|
|
yield self._persist_auth_tree(
|
2016-07-28 09:08:33 -06:00
|
|
|
origin, auth_chain, state, event
|
2015-10-06 08:58:21 -06:00
|
|
|
)
|
|
|
|
except AuthError as e:
|
|
|
|
raise FederationError(
|
|
|
|
"ERROR",
|
|
|
|
e.code,
|
|
|
|
e.msg,
|
|
|
|
affected=event.event_id,
|
|
|
|
)
|
2015-02-03 09:12:04 -07:00
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
else:
|
|
|
|
event_ids = set()
|
|
|
|
if state:
|
|
|
|
event_ids |= {e.event_id for e in state}
|
|
|
|
if auth_chain:
|
|
|
|
event_ids |= {e.event_id for e in auth_chain}
|
|
|
|
|
2018-04-17 11:30:53 -06:00
|
|
|
seen_ids = yield self.store.have_seen_events(event_ids)
|
2015-02-03 09:12:04 -07:00
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
if state and auth_chain is not None:
|
|
|
|
# If we have any state or auth_chain given to us by the replication
|
|
|
|
# layer, then we should handle them (if we haven't before.)
|
2015-06-25 10:18:19 -06:00
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
event_infos = []
|
2015-06-25 10:18:19 -06:00
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
for e in itertools.chain(auth_chain, state):
|
|
|
|
if e.event_id in seen_ids:
|
|
|
|
continue
|
|
|
|
e.internal_metadata.outlier = True
|
|
|
|
auth_ids = [e_id for e_id, _ in e.auth_events]
|
|
|
|
auth = {
|
|
|
|
(e.type, e.state_key): e for e in auth_chain
|
2015-10-13 10:19:26 -06:00
|
|
|
if e.event_id in auth_ids or e.type == EventTypes.Create
|
2015-10-06 08:58:21 -06:00
|
|
|
}
|
|
|
|
event_infos.append({
|
|
|
|
"event": e,
|
|
|
|
"auth_events": auth,
|
|
|
|
})
|
|
|
|
seen_ids.add(e.event_id)
|
2015-09-16 09:26:03 -06:00
|
|
|
|
2016-03-31 08:00:42 -06:00
|
|
|
yield self._handle_new_events(origin, event_infos)
|
2015-10-06 08:58:21 -06:00
|
|
|
|
|
|
|
try:
|
2018-07-25 09:00:38 -06:00
|
|
|
context = yield self._handle_new_event(
|
2015-10-06 08:58:21 -06:00
|
|
|
origin,
|
|
|
|
event,
|
|
|
|
state=state,
|
|
|
|
)
|
|
|
|
except AuthError as e:
|
|
|
|
raise FederationError(
|
|
|
|
"ERROR",
|
|
|
|
e.code,
|
|
|
|
e.msg,
|
|
|
|
affected=event.event_id,
|
|
|
|
)
|
2014-12-16 08:24:03 -07:00
|
|
|
|
2014-10-17 08:04:17 -06:00
|
|
|
room = yield self.store.get_room(event.room_id)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2014-10-17 08:04:17 -06:00
|
|
|
if not room:
|
2014-11-26 09:06:20 -07:00
|
|
|
try:
|
|
|
|
yield self.store.store_room(
|
|
|
|
room_id=event.room_id,
|
|
|
|
room_creator_user_id="",
|
|
|
|
is_public=False,
|
|
|
|
)
|
|
|
|
except StoreError:
|
|
|
|
logger.exception("Failed to store room.")
|
2014-08-19 07:20:03 -06:00
|
|
|
|
2014-12-16 04:29:05 -07:00
|
|
|
if event.type == EventTypes.Member:
|
2014-08-21 07:38:22 -06:00
|
|
|
if event.membership == Membership.JOIN:
|
2016-08-25 10:32:22 -06:00
|
|
|
# Only fire user_joined_room if the user has acutally
|
|
|
|
# joined the room. Don't bother if the user is just
|
|
|
|
# changing their profile info.
|
|
|
|
newly_joined = True
|
2018-07-23 06:00:22 -06:00
|
|
|
|
|
|
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
|
|
|
|
|
|
|
prev_state_id = prev_state_ids.get(
|
2016-08-25 10:32:22 -06:00
|
|
|
(event.type, event.state_key)
|
|
|
|
)
|
|
|
|
if prev_state_id:
|
|
|
|
prev_state = yield self.store.get_event(
|
|
|
|
prev_state_id, allow_none=True,
|
|
|
|
)
|
|
|
|
if prev_state and prev_state.membership == Membership.JOIN:
|
|
|
|
newly_joined = False
|
|
|
|
|
|
|
|
if newly_joined:
|
2015-12-01 12:46:15 -07:00
|
|
|
user = UserID.from_string(event.state_key)
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self.user_joined_room(user, event.room_id)
|
2014-08-21 07:38:22 -06:00
|
|
|
|
2014-08-19 07:20:03 -06:00
|
|
|
@log_function
|
|
|
|
@defer.inlineCallbacks
|
2016-08-16 04:34:36 -06:00
|
|
|
def backfill(self, dest, room_id, limit, extremities):
|
2014-11-12 09:20:21 -07:00
|
|
|
""" Trigger a backfill request to `dest` for the given `room_id`
|
2016-04-12 05:04:19 -06:00
|
|
|
|
2018-04-17 16:41:36 -06:00
|
|
|
This will attempt to get more events from the remote. If the other side
|
|
|
|
has no new events to offer, this will return an empty list.
|
|
|
|
|
|
|
|
As the events are received, we check their signatures, and also do some
|
|
|
|
sanity-checking on them. If any of the backfilled events are invalid,
|
|
|
|
this method throws a SynapseError.
|
|
|
|
|
|
|
|
TODO: make this more useful to distinguish failures of the remote
|
|
|
|
server from invalid events (there is probably no point in trying to
|
|
|
|
re-fetch invalid events from every other HS in the room.)
|
2014-11-12 09:20:21 -07:00
|
|
|
"""
|
2016-04-05 05:56:29 -06:00
|
|
|
if dest == self.server_name:
|
|
|
|
raise SynapseError(400, "Can't backfill from self.")
|
|
|
|
|
2018-07-31 08:44:05 -06:00
|
|
|
events = yield self.federation_client.backfill(
|
2014-10-31 03:59:02 -06:00
|
|
|
dest,
|
|
|
|
room_id,
|
2015-05-20 04:59:02 -06:00
|
|
|
limit=limit,
|
2014-11-10 04:59:51 -07:00
|
|
|
extremities=extremities,
|
2014-10-31 03:59:02 -06:00
|
|
|
)
|
2014-08-26 12:49:42 -06:00
|
|
|
|
2018-04-20 04:41:03 -06:00
|
|
|
# ideally we'd sanity check the events here for excess prev_events etc,
|
|
|
|
# but it's hard to reject events at this point without completely
|
|
|
|
# breaking backfill in the same way that it is currently broken by
|
|
|
|
# events whose signature we cannot verify (#3121).
|
|
|
|
#
|
|
|
|
# So for now we accept the events anyway. #3124 tracks this.
|
|
|
|
#
|
|
|
|
# for ev in events:
|
|
|
|
# self._sanity_check_event(ev)
|
2018-04-17 16:41:12 -06:00
|
|
|
|
2016-04-12 04:54:41 -06:00
|
|
|
# Don't bother processing events we already have.
|
2016-04-12 04:19:10 -06:00
|
|
|
seen_events = yield self.store.have_events_in_timeline(
|
|
|
|
set(e.event_id for e in events)
|
|
|
|
)
|
|
|
|
|
|
|
|
events = [e for e in events if e.event_id not in seen_events]
|
|
|
|
|
|
|
|
if not events:
|
|
|
|
defer.returnValue([])
|
|
|
|
|
2015-05-20 04:59:02 -06:00
|
|
|
event_map = {e.event_id: e for e in events}
|
2014-08-26 12:49:42 -06:00
|
|
|
|
2015-05-20 04:59:02 -06:00
|
|
|
event_ids = set(e.event_id for e in events)
|
2014-10-15 09:06:59 -06:00
|
|
|
|
2015-05-20 04:59:02 -06:00
|
|
|
edges = [
|
|
|
|
ev.event_id
|
|
|
|
for ev in events
|
|
|
|
if set(e_id for e_id, _ in ev.prev_events) - event_ids
|
|
|
|
]
|
2014-10-15 09:06:59 -06:00
|
|
|
|
2015-06-02 03:28:14 -06:00
|
|
|
logger.info(
|
|
|
|
"backfill: Got %d events with %d edges",
|
|
|
|
len(events), len(edges),
|
|
|
|
)
|
|
|
|
|
2015-05-20 04:59:02 -06:00
|
|
|
# For each edge get the current state.
|
2014-10-15 09:06:59 -06:00
|
|
|
|
2015-05-20 04:59:02 -06:00
|
|
|
auth_events = {}
|
2015-06-02 03:11:32 -06:00
|
|
|
state_events = {}
|
2015-05-20 04:59:02 -06:00
|
|
|
events_to_state = {}
|
|
|
|
for e_id in edges:
|
2018-07-31 08:44:05 -06:00
|
|
|
state, auth = yield self.federation_client.get_state_for_room(
|
2015-05-20 04:59:02 -06:00
|
|
|
destination=dest,
|
|
|
|
room_id=room_id,
|
|
|
|
event_id=e_id
|
|
|
|
)
|
|
|
|
auth_events.update({a.event_id: a for a in auth})
|
2015-06-02 03:58:35 -06:00
|
|
|
auth_events.update({s.event_id: s for s in state})
|
2015-06-02 03:11:32 -06:00
|
|
|
state_events.update({s.event_id: s for s in state})
|
2015-05-20 04:59:02 -06:00
|
|
|
events_to_state[e_id] = state
|
|
|
|
|
2015-06-02 03:58:35 -06:00
|
|
|
required_auth = set(
|
2016-08-05 05:59:04 -06:00
|
|
|
a_id
|
|
|
|
for event in events + state_events.values() + auth_events.values()
|
|
|
|
for a_id, _ in event.auth_events
|
2015-06-02 03:58:35 -06:00
|
|
|
)
|
2016-08-05 05:59:04 -06:00
|
|
|
auth_events.update({
|
|
|
|
e_id: event_map[e_id] for e_id in required_auth if e_id in event_map
|
|
|
|
})
|
2015-06-02 03:58:35 -06:00
|
|
|
missing_auth = required_auth - set(auth_events)
|
2016-08-05 05:59:04 -06:00
|
|
|
failed_to_fetch = set()
|
|
|
|
|
|
|
|
# Try and fetch any missing auth events from both DB and remote servers.
|
|
|
|
# We repeatedly do this until we stop finding new auth events.
|
|
|
|
while missing_auth - failed_to_fetch:
|
2016-06-15 03:58:07 -06:00
|
|
|
logger.info("Missing auth for backfill: %r", missing_auth)
|
2016-08-05 05:59:04 -06:00
|
|
|
ret_events = yield self.store.get_events(missing_auth - failed_to_fetch)
|
|
|
|
auth_events.update(ret_events)
|
|
|
|
|
|
|
|
required_auth.update(
|
|
|
|
a_id for event in ret_events.values() for a_id, _ in event.auth_events
|
|
|
|
)
|
|
|
|
missing_auth = required_auth - set(auth_events)
|
|
|
|
|
|
|
|
if missing_auth - failed_to_fetch:
|
|
|
|
logger.info(
|
|
|
|
"Fetching missing auth for backfill: %r",
|
|
|
|
missing_auth - failed_to_fetch
|
|
|
|
)
|
|
|
|
|
2017-10-06 15:14:24 -06:00
|
|
|
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
2016-08-05 05:59:04 -06:00
|
|
|
[
|
2018-04-27 04:29:27 -06:00
|
|
|
logcontext.run_in_background(
|
2018-07-31 08:44:05 -06:00
|
|
|
self.federation_client.get_pdu,
|
2016-08-05 05:59:04 -06:00
|
|
|
[dest],
|
|
|
|
event_id,
|
|
|
|
outlier=True,
|
|
|
|
timeout=10000,
|
|
|
|
)
|
|
|
|
for event_id in missing_auth - failed_to_fetch
|
|
|
|
],
|
|
|
|
consumeErrors=True
|
2016-08-23 08:23:39 -06:00
|
|
|
)).addErrback(unwrapFirstError)
|
2016-08-24 03:31:05 -06:00
|
|
|
auth_events.update({a.event_id: a for a in results if a})
|
2016-08-05 05:59:04 -06:00
|
|
|
required_auth.update(
|
2016-08-25 03:30:09 -06:00
|
|
|
a_id
|
|
|
|
for event in results if event
|
|
|
|
for a_id, _ in event.auth_events
|
2016-08-05 05:59:04 -06:00
|
|
|
)
|
|
|
|
missing_auth = required_auth - set(auth_events)
|
|
|
|
|
|
|
|
failed_to_fetch = missing_auth - set(auth_events)
|
|
|
|
|
2018-04-17 11:30:53 -06:00
|
|
|
seen_events = yield self.store.have_seen_events(
|
2016-08-05 05:59:04 -06:00
|
|
|
set(auth_events.keys()) | set(state_events.keys())
|
|
|
|
)
|
2015-06-02 03:11:32 -06:00
|
|
|
|
2015-06-25 10:18:19 -06:00
|
|
|
ev_infos = []
|
|
|
|
for a in auth_events.values():
|
|
|
|
if a.event_id in seen_events:
|
|
|
|
continue
|
2016-04-12 04:19:10 -06:00
|
|
|
a.internal_metadata.outlier = True
|
2015-06-25 10:18:19 -06:00
|
|
|
ev_infos.append({
|
|
|
|
"event": a,
|
|
|
|
"auth_events": {
|
|
|
|
(auth_events[a_id].type, auth_events[a_id].state_key):
|
|
|
|
auth_events[a_id]
|
|
|
|
for a_id, _ in a.auth_events
|
2016-08-05 05:59:04 -06:00
|
|
|
if a_id in auth_events
|
2015-06-25 10:18:19 -06:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
for e_id in events_to_state:
|
|
|
|
ev_infos.append({
|
|
|
|
"event": event_map[e_id],
|
|
|
|
"state": events_to_state[e_id],
|
|
|
|
"auth_events": {
|
|
|
|
(auth_events[a_id].type, auth_events[a_id].state_key):
|
|
|
|
auth_events[a_id]
|
|
|
|
for a_id, _ in event_map[e_id].auth_events
|
2016-08-05 05:59:04 -06:00
|
|
|
if a_id in auth_events
|
2015-06-25 10:18:19 -06:00
|
|
|
}
|
|
|
|
})
|
2015-05-20 04:59:02 -06:00
|
|
|
|
2016-04-12 04:19:10 -06:00
|
|
|
yield self._handle_new_events(
|
|
|
|
dest, ev_infos,
|
|
|
|
backfilled=True,
|
|
|
|
)
|
|
|
|
|
2015-05-20 04:59:02 -06:00
|
|
|
events.sort(key=lambda e: e.depth)
|
|
|
|
|
|
|
|
for event in events:
|
|
|
|
if event in events_to_state:
|
|
|
|
continue
|
|
|
|
|
2016-04-12 05:48:30 -06:00
|
|
|
# We store these one at a time since each event depends on the
|
|
|
|
# previous to work out the state.
|
|
|
|
# TODO: We can probably do something more clever here.
|
2016-04-12 04:19:10 -06:00
|
|
|
yield self._handle_new_event(
|
2016-06-15 03:58:07 -06:00
|
|
|
dest, event, backfilled=True,
|
2016-04-12 04:19:10 -06:00
|
|
|
)
|
2014-08-26 12:49:42 -06:00
|
|
|
|
|
|
|
defer.returnValue(events)
|
|
|
|
|
2015-05-11 11:01:31 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def maybe_backfill(self, room_id, current_depth):
|
2015-05-12 03:35:45 -06:00
|
|
|
"""Checks the database to see if we should backfill before paginating,
|
|
|
|
and if so do.
|
2015-05-11 11:01:31 -06:00
|
|
|
"""
|
|
|
|
extremities = yield self.store.get_oldest_events_with_depth_in_room(
|
|
|
|
room_id
|
|
|
|
)
|
|
|
|
|
|
|
|
if not extremities:
|
2015-05-12 03:35:45 -06:00
|
|
|
logger.debug("Not backfilling as no extremeties found.")
|
2015-05-11 11:01:31 -06:00
|
|
|
return
|
|
|
|
|
|
|
|
# Check if we reached a point where we should start backfilling.
|
|
|
|
sorted_extremeties_tuple = sorted(
|
|
|
|
extremities.items(),
|
|
|
|
key=lambda e: -int(e[1])
|
|
|
|
)
|
|
|
|
max_depth = sorted_extremeties_tuple[0][1]
|
|
|
|
|
2016-08-16 04:34:36 -06:00
|
|
|
# We don't want to specify too many extremities as it causes the backfill
|
|
|
|
# request URI to be too long.
|
|
|
|
extremities = dict(sorted_extremeties_tuple[:5])
|
|
|
|
|
2015-05-11 11:01:31 -06:00
|
|
|
if current_depth > max_depth:
|
2015-05-12 03:35:45 -06:00
|
|
|
logger.debug(
|
|
|
|
"Not backfilling as we don't need to. %d < %d",
|
2015-05-12 07:00:31 -06:00
|
|
|
max_depth, current_depth,
|
2015-05-12 03:35:45 -06:00
|
|
|
)
|
2015-05-11 11:01:31 -06:00
|
|
|
return
|
|
|
|
|
|
|
|
# Now we need to decide which hosts to hit first.
|
|
|
|
|
2015-05-12 03:35:45 -06:00
|
|
|
# First we try hosts that are already in the room
|
|
|
|
# TODO: HEURISTIC ALERT.
|
2015-05-11 11:01:31 -06:00
|
|
|
|
|
|
|
curr_state = yield self.state_handler.get_current_state(room_id)
|
|
|
|
|
|
|
|
def get_domains_from_state(state):
|
2018-05-22 09:23:39 -06:00
|
|
|
"""Get joined domains from state
|
|
|
|
|
|
|
|
Args:
|
|
|
|
state (dict[tuple, FrozenEvent]): State map from type/state
|
|
|
|
key to event.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list[tuple[str, int]]: Returns a list of servers with the
|
|
|
|
lowest depth of their joins. Sorted by lowest depth first.
|
|
|
|
"""
|
2015-05-11 11:01:31 -06:00
|
|
|
joined_users = [
|
|
|
|
(state_key, int(event.depth))
|
2018-07-20 23:47:18 -06:00
|
|
|
for (e_type, state_key), event in iteritems(state)
|
2015-05-11 11:01:31 -06:00
|
|
|
if e_type == EventTypes.Member
|
|
|
|
and event.membership == Membership.JOIN
|
|
|
|
]
|
|
|
|
|
|
|
|
joined_domains = {}
|
|
|
|
for u, d in joined_users:
|
|
|
|
try:
|
2016-05-16 12:17:03 -06:00
|
|
|
dom = get_domain_from_id(u)
|
2015-05-11 11:01:31 -06:00
|
|
|
old_d = joined_domains.get(dom)
|
|
|
|
if old_d:
|
|
|
|
joined_domains[dom] = min(d, old_d)
|
|
|
|
else:
|
|
|
|
joined_domains[dom] = d
|
2017-10-23 08:52:32 -06:00
|
|
|
except Exception:
|
2015-05-11 11:01:31 -06:00
|
|
|
pass
|
|
|
|
|
2018-07-20 23:47:18 -06:00
|
|
|
return sorted(joined_domains.items(), key=lambda d: d[1])
|
2015-05-11 11:01:31 -06:00
|
|
|
|
|
|
|
curr_domains = get_domains_from_state(curr_state)
|
|
|
|
|
|
|
|
likely_domains = [
|
|
|
|
domain for domain, depth in curr_domains
|
2016-04-05 05:56:29 -06:00
|
|
|
if domain != self.server_name
|
2015-05-11 11:01:31 -06:00
|
|
|
]
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def try_backfill(domains):
|
|
|
|
# TODO: Should we try multiple of these at a time?
|
|
|
|
for dom in domains:
|
2015-05-12 03:35:45 -06:00
|
|
|
try:
|
2016-04-12 04:19:10 -06:00
|
|
|
yield self.backfill(
|
2015-05-12 03:35:45 -06:00
|
|
|
dom, room_id,
|
|
|
|
limit=100,
|
2018-05-22 12:02:48 -06:00
|
|
|
extremities=extremities,
|
2015-05-12 03:35:45 -06:00
|
|
|
)
|
2016-04-12 05:04:19 -06:00
|
|
|
# If this succeeded then we probably already have the
|
|
|
|
# appropriate stuff.
|
2016-04-12 05:48:30 -06:00
|
|
|
# TODO: We can probably do something more intelligent here.
|
2016-04-12 04:19:10 -06:00
|
|
|
defer.returnValue(True)
|
2016-03-07 13:13:10 -07:00
|
|
|
except SynapseError as e:
|
2015-05-12 03:35:45 -06:00
|
|
|
logger.info(
|
2015-05-12 07:09:54 -06:00
|
|
|
"Failed to backfill from %s because %s",
|
2015-05-12 03:35:45 -06:00
|
|
|
dom, e,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
except CodeMessageException as e:
|
|
|
|
if 400 <= e.code < 500:
|
|
|
|
raise
|
|
|
|
|
|
|
|
logger.info(
|
2015-05-12 07:09:54 -06:00
|
|
|
"Failed to backfill from %s because %s",
|
2015-05-12 03:35:45 -06:00
|
|
|
dom, e,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
except NotRetryingDestination as e:
|
|
|
|
logger.info(e.message)
|
|
|
|
continue
|
2018-01-22 11:11:18 -07:00
|
|
|
except FederationDeniedError as e:
|
|
|
|
logger.info(e)
|
|
|
|
continue
|
2015-05-12 03:35:45 -06:00
|
|
|
except Exception as e:
|
2015-05-20 04:59:02 -06:00
|
|
|
logger.exception(
|
2015-05-12 07:09:54 -06:00
|
|
|
"Failed to backfill from %s because %s",
|
2015-05-12 03:35:45 -06:00
|
|
|
dom, e,
|
|
|
|
)
|
|
|
|
continue
|
2015-05-11 11:01:31 -06:00
|
|
|
|
|
|
|
defer.returnValue(False)
|
|
|
|
|
|
|
|
success = yield try_backfill(likely_domains)
|
|
|
|
if success:
|
|
|
|
defer.returnValue(True)
|
|
|
|
|
|
|
|
# Huh, well *those* domains didn't work out. Lets try some domains
|
|
|
|
# from the time.
|
|
|
|
|
|
|
|
tried_domains = set(likely_domains)
|
2015-05-12 09:19:42 -06:00
|
|
|
tried_domains.add(self.server_name)
|
2015-05-11 11:01:31 -06:00
|
|
|
|
2018-07-20 23:47:18 -06:00
|
|
|
event_ids = list(extremities.keys())
|
2015-05-12 06:58:14 -06:00
|
|
|
|
2017-01-17 10:07:15 -07:00
|
|
|
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
2018-01-27 01:48:41 -07:00
|
|
|
resolve = logcontext.preserve_fn(
|
|
|
|
self.state_handler.resolve_state_groups_for_events
|
|
|
|
)
|
2017-10-06 15:14:24 -06:00
|
|
|
states = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
2018-01-27 01:48:41 -07:00
|
|
|
[resolve(room_id, [e]) for e in event_ids],
|
|
|
|
consumeErrors=True,
|
2017-10-06 15:14:24 -06:00
|
|
|
))
|
2018-05-22 12:00:48 -06:00
|
|
|
|
|
|
|
# dict[str, dict[tuple, str]], a map from event_id to state map of
|
|
|
|
# event_ids.
|
2017-01-13 06:21:04 -07:00
|
|
|
states = dict(zip(event_ids, [s.state for s in states]))
|
2015-05-11 11:01:31 -06:00
|
|
|
|
2016-08-25 06:28:31 -06:00
|
|
|
state_map = yield self.store.get_events(
|
2018-07-20 23:47:18 -06:00
|
|
|
[e_id for ids in itervalues(states) for e_id in itervalues(ids)],
|
2016-08-25 06:28:31 -06:00
|
|
|
get_prev_content=False
|
|
|
|
)
|
|
|
|
states = {
|
|
|
|
key: {
|
|
|
|
k: state_map[e_id]
|
2018-07-20 23:47:18 -06:00
|
|
|
for k, e_id in iteritems(state_dict)
|
2016-08-25 06:28:31 -06:00
|
|
|
if e_id in state_map
|
2018-07-20 23:47:18 -06:00
|
|
|
} for key, state_dict in iteritems(states)
|
2016-08-25 06:28:31 -06:00
|
|
|
}
|
|
|
|
|
2015-05-11 11:01:31 -06:00
|
|
|
for e_id, _ in sorted_extremeties_tuple:
|
2015-05-12 07:02:01 -06:00
|
|
|
likely_domains = get_domains_from_state(states[e_id])
|
2015-05-11 11:01:31 -06:00
|
|
|
|
|
|
|
success = yield try_backfill([
|
2018-05-22 09:23:39 -06:00
|
|
|
dom for dom, _ in likely_domains
|
2015-05-11 11:01:31 -06:00
|
|
|
if dom not in tried_domains
|
|
|
|
])
|
|
|
|
if success:
|
|
|
|
defer.returnValue(True)
|
|
|
|
|
2018-05-22 09:23:39 -06:00
|
|
|
tried_domains.update(dom for dom, _ in likely_domains)
|
2015-05-11 11:01:31 -06:00
|
|
|
|
|
|
|
defer.returnValue(False)
|
|
|
|
|
2018-04-17 16:41:12 -06:00
|
|
|
def _sanity_check_event(self, ev):
|
|
|
|
"""
|
|
|
|
Do some early sanity checks of a received event
|
|
|
|
|
|
|
|
In particular, checks it doesn't have an excessive number of
|
|
|
|
prev_events or auth_events, which could cause a huge state resolution
|
|
|
|
or cascade of event fetches.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
ev (synapse.events.EventBase): event to be checked
|
|
|
|
|
|
|
|
Returns: None
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
SynapseError if the event does not pass muster
|
|
|
|
"""
|
|
|
|
if len(ev.prev_events) > 20:
|
|
|
|
logger.warn("Rejecting event %s which has %i prev_events",
|
|
|
|
ev.event_id, len(ev.prev_events))
|
|
|
|
raise SynapseError(
|
2018-04-28 03:46:48 -06:00
|
|
|
http_client.BAD_REQUEST,
|
2018-04-17 16:41:12 -06:00
|
|
|
"Too many prev_events",
|
|
|
|
)
|
|
|
|
|
|
|
|
if len(ev.auth_events) > 10:
|
|
|
|
logger.warn("Rejecting event %s which has %i auth_events",
|
|
|
|
ev.event_id, len(ev.auth_events))
|
|
|
|
raise SynapseError(
|
2018-04-28 03:46:48 -06:00
|
|
|
http_client.BAD_REQUEST,
|
2018-04-17 16:41:12 -06:00
|
|
|
"Too many auth_events",
|
|
|
|
)
|
|
|
|
|
2014-11-07 06:41:00 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def send_invite(self, target_host, event):
|
2014-11-12 09:20:21 -07:00
|
|
|
""" Sends the invite to the remote server for signing.
|
|
|
|
|
|
|
|
Invites must be signed by the invitee's server before distribution.
|
|
|
|
"""
|
2018-07-31 08:44:05 -06:00
|
|
|
pdu = yield self.federation_client.send_invite(
|
2014-11-07 06:41:00 -07:00
|
|
|
destination=target_host,
|
2015-01-16 11:59:04 -07:00
|
|
|
room_id=event.room_id,
|
2014-11-07 06:41:00 -07:00
|
|
|
event_id=event.event_id,
|
2014-11-14 14:25:02 -07:00
|
|
|
pdu=event
|
2014-11-07 06:41:00 -07:00
|
|
|
)
|
|
|
|
|
2014-11-14 14:25:02 -07:00
|
|
|
defer.returnValue(pdu)
|
2014-11-07 06:41:00 -07:00
|
|
|
|
2014-11-07 08:35:53 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def on_event_auth(self, event_id):
|
2017-05-24 07:22:41 -06:00
|
|
|
event = yield self.store.get_event(event_id)
|
|
|
|
auth = yield self.store.get_auth_chain(
|
|
|
|
[auth_id for auth_id, _ in event.auth_events],
|
|
|
|
include_given=True
|
|
|
|
)
|
2014-11-14 14:25:02 -07:00
|
|
|
defer.returnValue([e for e in auth])
|
2014-11-07 08:35:53 -07:00
|
|
|
|
2014-08-20 07:42:36 -06:00
|
|
|
@log_function
|
|
|
|
@defer.inlineCallbacks
|
2015-11-12 09:19:55 -07:00
|
|
|
def do_invite_join(self, target_hosts, room_id, joinee, content):
|
2014-11-12 09:20:21 -07:00
|
|
|
""" Attempts to join the `joinee` to the room `room_id` via the
|
|
|
|
server `target_host`.
|
|
|
|
|
|
|
|
This first triggers a /make_join/ request that returns a partial
|
|
|
|
event that we can fill out and sign. This is then sent to the
|
|
|
|
remote server via /send_join/ which responds with the state at that
|
|
|
|
event and the auth_chains.
|
|
|
|
|
|
|
|
We suspend processing of any received events from this room until we
|
|
|
|
have finished processing the join.
|
|
|
|
"""
|
2014-11-25 04:31:18 -07:00
|
|
|
logger.debug("Joining %s to %s", joinee, room_id)
|
|
|
|
|
2015-10-20 04:58:58 -06:00
|
|
|
origin, event = yield self._make_and_verify_event(
|
2015-02-05 06:43:28 -07:00
|
|
|
target_hosts,
|
2014-10-17 08:04:17 -06:00
|
|
|
room_id,
|
2015-10-01 10:49:52 -06:00
|
|
|
joinee,
|
2015-11-12 09:19:55 -07:00
|
|
|
"join",
|
|
|
|
content,
|
2014-08-20 07:42:36 -06:00
|
|
|
)
|
|
|
|
|
2017-03-14 05:26:57 -06:00
|
|
|
# This shouldn't happen, because the RoomMemberHandler has a
|
|
|
|
# linearizer lock which only allows one operation per user per room
|
|
|
|
# at a time - so this is just paranoia.
|
|
|
|
assert (room_id not in self.room_queues)
|
|
|
|
|
2014-10-29 10:59:24 -06:00
|
|
|
self.room_queues[room_id] = []
|
2017-03-14 05:26:57 -06:00
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._clean_room_for_join(room_id)
|
2017-03-14 05:26:57 -06:00
|
|
|
|
2014-12-10 08:55:03 -07:00
|
|
|
handled_events = set()
|
|
|
|
|
2014-10-29 10:59:24 -06:00
|
|
|
try:
|
2015-12-10 10:08:21 -07:00
|
|
|
event = self._sign_event(event)
|
2015-02-05 06:43:28 -07:00
|
|
|
# Try the host we successfully got a response to /make_join/
|
|
|
|
# request first.
|
2015-02-06 03:53:18 -07:00
|
|
|
try:
|
|
|
|
target_hosts.remove(origin)
|
|
|
|
target_hosts.insert(0, origin)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2018-07-31 08:44:05 -06:00
|
|
|
ret = yield self.federation_client.send_join(target_hosts, event)
|
2014-10-17 11:56:42 -06:00
|
|
|
|
2015-02-05 06:43:28 -07:00
|
|
|
origin = ret["origin"]
|
2014-11-25 04:31:18 -07:00
|
|
|
state = ret["state"]
|
|
|
|
auth_chain = ret["auth_chain"]
|
2014-11-27 09:02:26 -07:00
|
|
|
auth_chain.sort(key=lambda e: e.depth)
|
2014-08-20 07:42:36 -06:00
|
|
|
|
2014-12-10 08:55:03 -07:00
|
|
|
handled_events.update([s.event_id for s in state])
|
|
|
|
handled_events.update([a.event_id for a in auth_chain])
|
2015-12-10 10:08:21 -07:00
|
|
|
handled_events.add(event.event_id)
|
2014-12-10 08:55:03 -07:00
|
|
|
|
2014-11-25 04:31:18 -07:00
|
|
|
logger.debug("do_invite_join auth_chain: %s", auth_chain)
|
|
|
|
logger.debug("do_invite_join state: %s", state)
|
2014-10-17 11:56:42 -06:00
|
|
|
|
2015-12-10 10:08:21 -07:00
|
|
|
logger.debug("do_invite_join event: %s", event)
|
2014-10-29 10:59:24 -06:00
|
|
|
|
|
|
|
try:
|
|
|
|
yield self.store.store_room(
|
|
|
|
room_id=room_id,
|
|
|
|
room_creator_user_id="",
|
|
|
|
is_public=False
|
|
|
|
)
|
2017-10-23 08:52:32 -06:00
|
|
|
except Exception:
|
2014-10-29 10:59:24 -06:00
|
|
|
# FIXME
|
|
|
|
pass
|
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._persist_auth_tree(
|
2016-07-28 09:08:33 -06:00
|
|
|
origin, auth_chain, state, event
|
2014-11-26 03:41:08 -07:00
|
|
|
)
|
2014-10-29 10:59:24 -06:00
|
|
|
|
2014-11-25 04:31:18 -07:00
|
|
|
logger.debug("Finished joining %s to %s", joinee, room_id)
|
2014-10-29 10:59:24 -06:00
|
|
|
finally:
|
|
|
|
room_queue = self.room_queues[room_id]
|
|
|
|
del self.room_queues[room_id]
|
2014-10-17 11:56:42 -06:00
|
|
|
|
2017-03-14 05:26:57 -06:00
|
|
|
# we don't need to wait for the queued events to be processed -
|
|
|
|
# it's just a best-effort thing at this point. We do want to do
|
|
|
|
# them roughly in order, though, otherwise we'll end up making
|
|
|
|
# lots of requests for missing prev_events which we do actually
|
|
|
|
# have. Hence we fire off the deferred, but don't wait for it.
|
2014-12-10 08:55:03 -07:00
|
|
|
|
2018-04-27 04:29:27 -06:00
|
|
|
logcontext.run_in_background(self._handle_queued_pdus, room_queue)
|
2014-10-17 11:56:42 -06:00
|
|
|
|
2014-08-20 07:42:36 -06:00
|
|
|
defer.returnValue(True)
|
2014-08-21 07:38:22 -06:00
|
|
|
|
2017-03-14 05:26:57 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _handle_queued_pdus(self, room_queue):
|
|
|
|
"""Process PDUs which got queued up while we were busy send_joining.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_queue (list[FrozenEvent, str]): list of PDUs to be processed
|
|
|
|
and the servers that sent them
|
|
|
|
"""
|
|
|
|
for p, origin in room_queue:
|
|
|
|
try:
|
|
|
|
logger.info("Processing queued PDU %s which was received "
|
|
|
|
"while we were joining %s", p.event_id, p.room_id)
|
|
|
|
yield self.on_receive_pdu(origin, p)
|
|
|
|
except Exception as e:
|
|
|
|
logger.warn(
|
|
|
|
"Error handling queued PDU %s from %s: %s",
|
|
|
|
p.event_id, origin, e)
|
|
|
|
|
2014-10-16 09:56:51 -06:00
|
|
|
@defer.inlineCallbacks
|
2014-10-17 12:37:41 -06:00
|
|
|
@log_function
|
2015-11-05 09:43:19 -07:00
|
|
|
def on_make_join_request(self, room_id, user_id):
|
2014-11-12 09:20:21 -07:00
|
|
|
""" We've received a /make_join/ request, so we create a partial
|
2015-10-20 04:58:58 -06:00
|
|
|
join event for the room and return that. We do *not* persist or
|
2014-11-12 09:20:21 -07:00
|
|
|
process it until the other server has signed it and sent it back.
|
|
|
|
"""
|
2015-10-01 10:49:52 -06:00
|
|
|
event_content = {"membership": Membership.JOIN}
|
|
|
|
|
2014-12-04 08:50:01 -07:00
|
|
|
builder = self.event_builder_factory.new({
|
2014-12-16 04:29:05 -07:00
|
|
|
"type": EventTypes.Member,
|
2015-10-01 10:49:52 -06:00
|
|
|
"content": event_content,
|
2014-12-05 09:20:48 -07:00
|
|
|
"room_id": room_id,
|
2014-12-04 08:50:01 -07:00
|
|
|
"sender": user_id,
|
|
|
|
"state_key": user_id,
|
|
|
|
})
|
|
|
|
|
2016-04-13 04:11:46 -06:00
|
|
|
try:
|
2018-02-06 09:31:50 -07:00
|
|
|
event, context = yield self.event_creation_handler.create_new_client_event(
|
2016-04-13 04:11:46 -06:00
|
|
|
builder=builder,
|
|
|
|
)
|
|
|
|
except AuthError as e:
|
|
|
|
logger.warn("Failed to create join %r because %s", event, e)
|
|
|
|
raise e
|
2014-10-16 09:56:51 -06:00
|
|
|
|
2016-07-15 02:29:54 -06:00
|
|
|
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
|
|
|
# when we get the event back in `on_send_join_request`
|
2016-08-25 10:32:22 -06:00
|
|
|
yield self.auth.check_from_context(event, context, do_sig_check=False)
|
2014-10-17 12:37:41 -06:00
|
|
|
|
2015-03-16 07:06:23 -06:00
|
|
|
defer.returnValue(event)
|
2014-10-16 09:56:51 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2014-10-17 12:37:41 -06:00
|
|
|
@log_function
|
2014-10-16 09:56:51 -06:00
|
|
|
def on_send_join_request(self, origin, pdu):
|
2014-11-12 09:20:21 -07:00
|
|
|
""" We have received a join event for a room. Fully process it and
|
|
|
|
respond with the current state and auth chains.
|
|
|
|
"""
|
2014-11-14 14:25:02 -07:00
|
|
|
event = pdu
|
2014-10-16 09:56:51 -06:00
|
|
|
|
2014-12-10 03:06:12 -07:00
|
|
|
logger.debug(
|
|
|
|
"on_send_join_request: Got event: %s, signatures: %s",
|
|
|
|
event.event_id,
|
|
|
|
event.signatures,
|
|
|
|
)
|
|
|
|
|
2014-12-08 02:08:26 -07:00
|
|
|
event.internal_metadata.outlier = False
|
2017-04-03 08:58:07 -06:00
|
|
|
# Send this event on behalf of the origin server.
|
|
|
|
#
|
|
|
|
# The reasons we have the destination server rather than the origin
|
|
|
|
# server send it are slightly mysterious: the origin server should have
|
|
|
|
# all the neccessary state once it gets the response to the send_join,
|
|
|
|
# so it could send the event itself if it wanted to. It may be that
|
|
|
|
# doing it this way reduces failure modes, or avoids certain attacks
|
|
|
|
# where a new server selectively tells a subset of the federation that
|
|
|
|
# it has joined.
|
|
|
|
#
|
|
|
|
# The fact is that, as of the current writing, Synapse doesn't send out
|
|
|
|
# the join event over federation after joining, and changing it now
|
|
|
|
# would introduce the danger of backwards-compatibility problems.
|
2017-01-05 04:26:30 -07:00
|
|
|
event.internal_metadata.send_on_behalf_of = origin
|
2014-10-17 12:37:41 -06:00
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
context = yield self._handle_new_event(
|
2015-05-13 06:42:21 -06:00
|
|
|
origin, event
|
|
|
|
)
|
2014-10-16 09:56:51 -06:00
|
|
|
|
2014-12-10 03:06:12 -07:00
|
|
|
logger.debug(
|
|
|
|
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
|
|
|
|
event.event_id,
|
|
|
|
event.signatures,
|
|
|
|
)
|
|
|
|
|
2014-12-16 04:29:05 -07:00
|
|
|
if event.type == EventTypes.Member:
|
2014-11-25 04:31:18 -07:00
|
|
|
if event.content["membership"] == Membership.JOIN:
|
2015-01-23 04:47:15 -07:00
|
|
|
user = UserID.from_string(event.state_key)
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self.user_joined_room(user, event.room_id)
|
2014-10-16 09:56:51 -06:00
|
|
|
|
2018-07-23 06:00:22 -06:00
|
|
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
|
|
|
|
|
|
|
state_ids = list(prev_state_ids.values())
|
2017-05-24 07:22:41 -06:00
|
|
|
auth_chain = yield self.store.get_auth_chain(state_ids)
|
2014-11-07 04:22:12 -07:00
|
|
|
|
2018-07-23 06:00:22 -06:00
|
|
|
state = yield self.store.get_events(list(prev_state_ids.values()))
|
2016-08-25 10:32:22 -06:00
|
|
|
|
2014-11-07 04:22:12 -07:00
|
|
|
defer.returnValue({
|
2018-05-31 03:03:47 -06:00
|
|
|
"state": list(state.values()),
|
2014-11-14 14:25:02 -07:00
|
|
|
"auth_chain": auth_chain,
|
2014-11-07 04:22:12 -07:00
|
|
|
})
|
2014-10-16 09:56:51 -06:00
|
|
|
|
2014-11-07 06:41:00 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def on_invite_request(self, origin, pdu):
|
2014-11-12 09:20:21 -07:00
|
|
|
""" We've got an invite event. Process and persist it. Sign it.
|
|
|
|
|
|
|
|
Respond with the now signed event.
|
|
|
|
"""
|
2014-11-14 14:25:02 -07:00
|
|
|
event = pdu
|
2014-11-07 06:41:00 -07:00
|
|
|
|
2017-10-05 07:02:28 -06:00
|
|
|
if event.state_key is None:
|
|
|
|
raise SynapseError(400, "The invite event did not have a state key")
|
|
|
|
|
2017-06-19 05:36:28 -06:00
|
|
|
is_blocked = yield self.store.is_room_blocked(event.room_id)
|
|
|
|
if is_blocked:
|
|
|
|
raise SynapseError(403, "This room has been blocked on this server")
|
|
|
|
|
2017-09-19 09:08:14 -06:00
|
|
|
if self.hs.config.block_non_admin_invites:
|
|
|
|
raise SynapseError(403, "This server does not accept room invites")
|
|
|
|
|
2017-10-05 07:02:28 -06:00
|
|
|
if not self.spam_checker.user_may_invite(
|
|
|
|
event.sender, event.state_key, event.room_id,
|
|
|
|
):
|
2017-10-03 07:04:10 -06:00
|
|
|
raise SynapseError(
|
2017-10-05 07:02:28 -06:00
|
|
|
403, "This user is not permitted to send invites to this server/user"
|
2017-10-03 07:04:10 -06:00
|
|
|
)
|
2017-10-03 06:53:09 -06:00
|
|
|
|
2017-06-30 09:20:30 -06:00
|
|
|
membership = event.content.get("membership")
|
|
|
|
if event.type != EventTypes.Member or membership != Membership.INVITE:
|
|
|
|
raise SynapseError(400, "The event was not an m.room.member invite event")
|
|
|
|
|
|
|
|
sender_domain = get_domain_from_id(event.sender)
|
|
|
|
if sender_domain != origin:
|
|
|
|
raise SynapseError(400, "The invite event was not from the server sending it")
|
|
|
|
|
|
|
|
if not self.is_mine_id(event.state_key):
|
|
|
|
raise SynapseError(400, "The invite event must be for this server")
|
|
|
|
|
2018-05-18 04:18:39 -06:00
|
|
|
# block any attempts to invite the server notices mxid
|
|
|
|
if event.state_key == self._server_notices_mxid:
|
|
|
|
raise SynapseError(
|
|
|
|
http_client.FORBIDDEN,
|
|
|
|
"Cannot invite this user",
|
|
|
|
)
|
|
|
|
|
2014-12-05 09:20:48 -07:00
|
|
|
event.internal_metadata.outlier = True
|
2016-03-31 08:00:42 -06:00
|
|
|
event.internal_metadata.invite_from_remote = True
|
2014-11-07 06:41:00 -07:00
|
|
|
|
|
|
|
event.signatures.update(
|
|
|
|
compute_event_signature(
|
|
|
|
event,
|
|
|
|
self.hs.hostname,
|
|
|
|
self.hs.config.signing_key[0]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2014-12-16 08:59:17 -07:00
|
|
|
context = yield self.state_handler.compute_event_context(event)
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._persist_events([(event, context)])
|
2014-11-07 06:41:00 -07:00
|
|
|
|
2014-11-14 14:25:02 -07:00
|
|
|
defer.returnValue(event)
|
2014-11-07 06:41:00 -07:00
|
|
|
|
2015-10-20 04:58:58 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
|
2017-04-20 18:24:17 -06:00
|
|
|
origin, event = yield self._make_and_verify_event(
|
|
|
|
target_hosts,
|
|
|
|
room_id,
|
|
|
|
user_id,
|
|
|
|
"leave"
|
|
|
|
)
|
2017-06-09 06:05:05 -06:00
|
|
|
# Mark as outlier as we don't have any state for this event; we're not
|
|
|
|
# even in the room.
|
2017-06-09 03:08:18 -06:00
|
|
|
event.internal_metadata.outlier = True
|
2017-04-20 18:24:17 -06:00
|
|
|
event = self._sign_event(event)
|
2015-10-20 04:58:58 -06:00
|
|
|
|
2017-04-07 07:39:32 -06:00
|
|
|
# Try the host that we succesfully called /make_leave/ on first for
|
|
|
|
# the /send_leave/ request.
|
2015-10-20 04:58:58 -06:00
|
|
|
try:
|
|
|
|
target_hosts.remove(origin)
|
|
|
|
target_hosts.insert(0, origin)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
2018-07-31 08:44:05 -06:00
|
|
|
yield self.federation_client.send_leave(
|
2017-04-20 18:24:17 -06:00
|
|
|
target_hosts,
|
|
|
|
event
|
|
|
|
)
|
2016-03-15 07:24:31 -06:00
|
|
|
|
|
|
|
context = yield self.state_handler.compute_event_context(event)
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._persist_events([(event, context)])
|
2016-03-15 07:24:31 -06:00
|
|
|
|
|
|
|
defer.returnValue(event)
|
2015-10-20 04:58:58 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2015-11-12 09:19:55 -07:00
|
|
|
def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
|
|
|
|
content={},):
|
2018-07-31 08:44:05 -06:00
|
|
|
origin, pdu = yield self.federation_client.make_membership_event(
|
2015-10-20 04:58:58 -06:00
|
|
|
target_hosts,
|
|
|
|
room_id,
|
|
|
|
user_id,
|
2015-11-12 09:19:55 -07:00
|
|
|
membership,
|
|
|
|
content,
|
2015-10-20 04:58:58 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug("Got response to make_%s: %s", membership, pdu)
|
|
|
|
|
|
|
|
event = pdu
|
|
|
|
|
|
|
|
# We should assert some things.
|
|
|
|
# FIXME: Do this in a nicer way
|
|
|
|
assert(event.type == EventTypes.Member)
|
|
|
|
assert(event.user_id == user_id)
|
|
|
|
assert(event.state_key == user_id)
|
|
|
|
assert(event.room_id == room_id)
|
|
|
|
defer.returnValue((origin, event))
|
|
|
|
|
|
|
|
def _sign_event(self, event):
|
|
|
|
event.internal_metadata.outlier = False
|
|
|
|
|
|
|
|
builder = self.event_builder_factory.new(
|
|
|
|
unfreeze(event.get_pdu_json())
|
|
|
|
)
|
|
|
|
|
|
|
|
builder.event_id = self.event_builder_factory.create_event_id()
|
|
|
|
builder.origin = self.hs.hostname
|
|
|
|
|
|
|
|
if not hasattr(event, "signatures"):
|
|
|
|
builder.signatures = {}
|
|
|
|
|
|
|
|
add_hashes_and_signatures(
|
|
|
|
builder,
|
|
|
|
self.hs.hostname,
|
|
|
|
self.hs.config.signing_key[0],
|
|
|
|
)
|
|
|
|
|
|
|
|
return builder.build()
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def on_make_leave_request(self, room_id, user_id):
|
|
|
|
""" We've received a /make_leave/ request, so we create a partial
|
2018-07-25 15:44:41 -06:00
|
|
|
leave event for the room and return that. We do *not* persist or
|
2015-10-20 04:58:58 -06:00
|
|
|
process it until the other server has signed it and sent it back.
|
|
|
|
"""
|
|
|
|
builder = self.event_builder_factory.new({
|
|
|
|
"type": EventTypes.Member,
|
|
|
|
"content": {"membership": Membership.LEAVE},
|
|
|
|
"room_id": room_id,
|
|
|
|
"sender": user_id,
|
|
|
|
"state_key": user_id,
|
|
|
|
})
|
|
|
|
|
2018-02-06 09:31:50 -07:00
|
|
|
event, context = yield self.event_creation_handler.create_new_client_event(
|
2015-10-20 04:58:58 -06:00
|
|
|
builder=builder,
|
|
|
|
)
|
|
|
|
|
2016-04-13 04:11:46 -06:00
|
|
|
try:
|
2016-07-15 02:29:54 -06:00
|
|
|
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
|
|
|
# when we get the event back in `on_send_leave_request`
|
2016-08-25 10:32:22 -06:00
|
|
|
yield self.auth.check_from_context(event, context, do_sig_check=False)
|
2016-04-13 04:11:46 -06:00
|
|
|
except AuthError as e:
|
|
|
|
logger.warn("Failed to create new leave %r because %s", event, e)
|
|
|
|
raise e
|
2015-10-20 04:58:58 -06:00
|
|
|
|
|
|
|
defer.returnValue(event)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def on_send_leave_request(self, origin, pdu):
|
|
|
|
""" We have received a leave event for a room. Fully process it."""
|
|
|
|
event = pdu
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"on_send_leave_request: Got event: %s, signatures: %s",
|
|
|
|
event.event_id,
|
|
|
|
event.signatures,
|
|
|
|
)
|
|
|
|
|
|
|
|
event.internal_metadata.outlier = False
|
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._handle_new_event(
|
2015-10-20 04:58:58 -06:00
|
|
|
origin, event
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"on_send_leave_request: After _handle_new_event: %s, sigs: %s",
|
|
|
|
event.event_id,
|
|
|
|
event.signatures,
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue(None)
|
|
|
|
|
2014-10-17 08:04:17 -06:00
|
|
|
@defer.inlineCallbacks
|
2016-07-21 03:30:12 -06:00
|
|
|
def get_state_for_pdu(self, room_id, event_id):
|
2016-09-02 07:19:22 -06:00
|
|
|
"""Returns the state at the event. i.e. not including said event.
|
|
|
|
"""
|
2018-08-02 04:53:52 -06:00
|
|
|
|
2018-08-02 06:23:48 -06:00
|
|
|
event = yield self.store.get_event(
|
|
|
|
event_id, allow_none=False, check_room_id=room_id,
|
|
|
|
)
|
2018-08-02 04:53:52 -06:00
|
|
|
|
2014-10-17 08:04:17 -06:00
|
|
|
state_groups = yield self.store.get_state_groups(
|
2015-08-21 02:15:13 -06:00
|
|
|
room_id, [event_id]
|
2014-10-17 08:04:17 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
if state_groups:
|
2018-05-05 14:47:18 -06:00
|
|
|
_, state = list(iteritems(state_groups)).pop()
|
2014-10-30 05:53:35 -06:00
|
|
|
results = {
|
2014-11-11 07:16:41 -07:00
|
|
|
(e.type, e.state_key): e for e in state
|
2014-10-30 05:53:35 -06:00
|
|
|
}
|
|
|
|
|
2018-08-02 06:23:48 -06:00
|
|
|
if event.is_state():
|
2014-10-30 05:53:35 -06:00
|
|
|
# Get previous state
|
2014-12-11 08:56:01 -07:00
|
|
|
if "replaces_state" in event.unsigned:
|
|
|
|
prev_id = event.unsigned["replaces_state"]
|
|
|
|
if prev_id != event.event_id:
|
|
|
|
prev_event = yield self.store.get_event(prev_id)
|
|
|
|
results[(event.type, event.state_key)] = prev_event
|
2014-10-30 05:53:35 -06:00
|
|
|
else:
|
|
|
|
del results[(event.type, event.state_key)]
|
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
res = list(results.values())
|
2014-11-27 09:02:26 -07:00
|
|
|
defer.returnValue(res)
|
2014-10-17 08:04:17 -06:00
|
|
|
else:
|
|
|
|
defer.returnValue([])
|
|
|
|
|
2016-09-02 03:49:43 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_state_ids_for_pdu(self, room_id, event_id):
|
2016-09-02 07:19:22 -06:00
|
|
|
"""Returns the state at the event. i.e. not including said event.
|
|
|
|
"""
|
2018-08-02 06:23:48 -06:00
|
|
|
event = yield self.store.get_event(
|
|
|
|
event_id, allow_none=False, check_room_id=room_id,
|
|
|
|
)
|
2018-08-02 04:53:52 -06:00
|
|
|
|
2016-09-02 03:49:43 -06:00
|
|
|
state_groups = yield self.store.get_state_groups_ids(
|
|
|
|
room_id, [event_id]
|
|
|
|
)
|
|
|
|
|
|
|
|
if state_groups:
|
|
|
|
_, state = state_groups.items().pop()
|
|
|
|
results = state
|
|
|
|
|
2018-08-02 06:23:48 -06:00
|
|
|
if event.is_state():
|
2016-09-02 03:49:43 -06:00
|
|
|
# Get previous state
|
|
|
|
if "replaces_state" in event.unsigned:
|
|
|
|
prev_id = event.unsigned["replaces_state"]
|
|
|
|
if prev_id != event.event_id:
|
|
|
|
results[(event.type, event.state_key)] = prev_id
|
|
|
|
else:
|
2017-02-28 03:01:19 -07:00
|
|
|
results.pop((event.type, event.state_key), None)
|
2016-09-02 03:49:43 -06:00
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
defer.returnValue(list(results.values()))
|
2016-09-02 03:49:43 -06:00
|
|
|
else:
|
|
|
|
defer.returnValue([])
|
|
|
|
|
2014-10-31 03:59:02 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
2015-01-07 08:16:31 -07:00
|
|
|
def on_backfill_request(self, origin, room_id, pdu_list, limit):
|
|
|
|
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
2014-11-10 04:59:51 -07:00
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
2014-10-31 03:59:02 -06:00
|
|
|
|
|
|
|
events = yield self.store.get_backfill_events(
|
2015-01-07 09:18:12 -07:00
|
|
|
room_id,
|
2014-11-03 06:06:58 -07:00
|
|
|
pdu_list,
|
2014-10-31 03:59:02 -06:00
|
|
|
limit
|
|
|
|
)
|
|
|
|
|
2018-07-16 04:38:45 -06:00
|
|
|
events = yield filter_events_for_server(self.store, origin, events)
|
2015-07-03 10:52:57 -06:00
|
|
|
|
2014-11-14 14:25:02 -07:00
|
|
|
defer.returnValue(events)
|
2014-10-31 03:59:02 -06:00
|
|
|
|
2014-10-31 04:47:34 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
2018-06-07 09:18:57 -06:00
|
|
|
def get_persisted_pdu(self, origin, event_id):
|
|
|
|
"""Get an event from the database for the given server.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
origin [str]: hostname of server which is requesting the event; we
|
|
|
|
will check that the server is allowed to see it.
|
|
|
|
event_id [str]: id of the event being requested
|
2014-10-31 04:47:34 -06:00
|
|
|
|
|
|
|
Returns:
|
2018-06-07 09:18:57 -06:00
|
|
|
Deferred[EventBase|None]: None if we know nothing about the event;
|
|
|
|
otherwise the (possibly-redacted) event.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
AuthError if the server is not currently in the room
|
2014-10-31 04:47:34 -06:00
|
|
|
"""
|
|
|
|
event = yield self.store.get_event(
|
2014-11-03 06:06:58 -07:00
|
|
|
event_id,
|
2014-10-31 04:47:34 -06:00
|
|
|
allow_none=True,
|
2015-02-03 03:40:14 -07:00
|
|
|
allow_rejected=True,
|
2014-10-31 04:47:34 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
if event:
|
2018-06-07 09:18:57 -06:00
|
|
|
in_room = yield self.auth.check_host_in_room(
|
|
|
|
event.room_id,
|
|
|
|
origin
|
|
|
|
)
|
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
2016-08-10 06:22:20 -06:00
|
|
|
|
2018-07-16 04:38:45 -06:00
|
|
|
events = yield filter_events_for_server(
|
|
|
|
self.store, origin, [event],
|
2018-06-07 09:18:57 -06:00
|
|
|
)
|
|
|
|
event = events[0]
|
2014-11-14 14:25:02 -07:00
|
|
|
defer.returnValue(event)
|
2014-10-31 04:47:34 -06:00
|
|
|
else:
|
|
|
|
defer.returnValue(None)
|
|
|
|
|
|
|
|
@log_function
|
|
|
|
def get_min_depth_for_context(self, context):
|
|
|
|
return self.store.get_min_depth(context)
|
|
|
|
|
2014-11-25 04:31:18 -07:00
|
|
|
@defer.inlineCallbacks
|
2015-01-29 09:50:23 -07:00
|
|
|
@log_function
|
2016-04-12 04:58:04 -06:00
|
|
|
def _handle_new_event(self, origin, event, state=None, auth_events=None,
|
|
|
|
backfilled=False):
|
2015-06-25 10:18:19 -06:00
|
|
|
context = yield self._prep_event(
|
|
|
|
origin, event,
|
|
|
|
state=state,
|
|
|
|
auth_events=auth_events,
|
|
|
|
)
|
|
|
|
|
2018-02-20 04:36:56 -07:00
|
|
|
try:
|
|
|
|
if not event.internal_metadata.is_outlier() and not backfilled:
|
|
|
|
yield self.action_generator.handle_push_actions_for_event(
|
|
|
|
event, context
|
|
|
|
)
|
2016-02-09 09:19:15 -07:00
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._persist_events(
|
|
|
|
[(event, context)],
|
2018-02-20 04:36:56 -07:00
|
|
|
backfilled=backfilled,
|
|
|
|
)
|
|
|
|
except: # noqa: E722, as we reraise the exception this is fine.
|
2018-04-27 04:40:06 -06:00
|
|
|
tp, value, tb = sys.exc_info()
|
|
|
|
|
|
|
|
logcontext.run_in_background(
|
|
|
|
self.store.remove_push_actions_from_staging,
|
|
|
|
event.event_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
six.reraise(tp, value, tb)
|
2015-06-25 10:18:19 -06:00
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
defer.returnValue(context)
|
2015-06-25 10:18:19 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2016-03-31 08:00:42 -06:00
|
|
|
def _handle_new_events(self, origin, event_infos, backfilled=False):
|
2016-04-12 05:48:30 -06:00
|
|
|
"""Creates the appropriate contexts and persists events. The events
|
|
|
|
should not depend on one another, e.g. this should be used to persist
|
|
|
|
a bunch of outliers, but not a chunk of individual events that depend
|
|
|
|
on each other for state calculations.
|
2018-08-01 06:39:14 -06:00
|
|
|
|
|
|
|
Notifies about the events where appropriate.
|
2016-04-12 05:48:30 -06:00
|
|
|
"""
|
2017-10-06 15:14:24 -06:00
|
|
|
contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
2015-06-25 10:18:19 -06:00
|
|
|
[
|
2018-04-27 04:29:27 -06:00
|
|
|
logcontext.run_in_background(
|
|
|
|
self._prep_event,
|
2015-06-25 10:18:19 -06:00
|
|
|
origin,
|
|
|
|
ev_info["event"],
|
|
|
|
state=ev_info.get("state"),
|
|
|
|
auth_events=ev_info.get("auth_events"),
|
|
|
|
)
|
|
|
|
for ev_info in event_infos
|
2017-10-06 15:14:24 -06:00
|
|
|
], consumeErrors=True,
|
2016-08-23 08:23:39 -06:00
|
|
|
))
|
2015-06-25 10:18:19 -06:00
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._persist_events(
|
2015-06-25 10:18:19 -06:00
|
|
|
[
|
|
|
|
(ev_info["event"], context)
|
2018-07-20 23:47:18 -06:00
|
|
|
for ev_info, context in zip(event_infos, contexts)
|
2015-06-25 10:18:19 -06:00
|
|
|
],
|
|
|
|
backfilled=backfilled,
|
2014-12-10 03:06:12 -07:00
|
|
|
)
|
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
@defer.inlineCallbacks
|
2016-07-28 09:08:33 -06:00
|
|
|
def _persist_auth_tree(self, origin, auth_events, state, event):
|
2015-10-06 08:58:21 -06:00
|
|
|
"""Checks the auth chain is valid (and passes auth checks) for the
|
|
|
|
state and event. Then persists the auth chain and state atomically.
|
2018-08-01 06:39:14 -06:00
|
|
|
Persists the event separately. Notifies about the persisted events
|
|
|
|
where appropriate.
|
2015-10-06 08:58:21 -06:00
|
|
|
|
2016-07-29 04:17:04 -06:00
|
|
|
Will attempt to fetch missing auth events.
|
|
|
|
|
2016-07-29 03:45:05 -06:00
|
|
|
Args:
|
|
|
|
origin (str): Where the events came from
|
|
|
|
auth_events (list)
|
|
|
|
state (list)
|
|
|
|
event (Event)
|
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
Returns:
|
2018-08-01 06:39:14 -06:00
|
|
|
Deferred
|
2015-10-06 08:58:21 -06:00
|
|
|
"""
|
|
|
|
events_to_context = {}
|
|
|
|
for e in itertools.chain(auth_events, state):
|
|
|
|
e.internal_metadata.outlier = True
|
2016-03-31 08:32:24 -06:00
|
|
|
ctx = yield self.state_handler.compute_event_context(e)
|
|
|
|
events_to_context[e.event_id] = ctx
|
2015-10-06 08:58:21 -06:00
|
|
|
|
|
|
|
event_map = {
|
|
|
|
e.event_id: e
|
2016-07-28 09:08:33 -06:00
|
|
|
for e in itertools.chain(auth_events, state, [event])
|
2015-10-06 08:58:21 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
create_event = None
|
|
|
|
for e in auth_events:
|
|
|
|
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
|
|
|
create_event = e
|
|
|
|
break
|
|
|
|
|
2016-07-28 09:08:33 -06:00
|
|
|
missing_auth_events = set()
|
|
|
|
for e in itertools.chain(auth_events, state, [event]):
|
|
|
|
for e_id, _ in e.auth_events:
|
|
|
|
if e_id not in event_map:
|
|
|
|
missing_auth_events.add(e_id)
|
|
|
|
|
|
|
|
for e_id in missing_auth_events:
|
2018-07-31 08:44:05 -06:00
|
|
|
m_ev = yield self.federation_client.get_pdu(
|
2016-07-28 09:08:33 -06:00
|
|
|
[origin],
|
|
|
|
e_id,
|
|
|
|
outlier=True,
|
|
|
|
timeout=10000,
|
|
|
|
)
|
|
|
|
if m_ev and m_ev.event_id == e_id:
|
|
|
|
event_map[e_id] = m_ev
|
|
|
|
else:
|
|
|
|
logger.info("Failed to find auth event %r", e_id)
|
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
for e in itertools.chain(auth_events, state, [event]):
|
|
|
|
auth_for_e = {
|
|
|
|
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
|
|
|
|
for e_id, _ in e.auth_events
|
2016-07-28 09:08:33 -06:00
|
|
|
if e_id in event_map
|
2015-10-06 08:58:21 -06:00
|
|
|
}
|
|
|
|
if create_event:
|
|
|
|
auth_for_e[(EventTypes.Create, "")] = create_event
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.auth.check(e, auth_events=auth_for_e)
|
2016-01-27 10:02:10 -07:00
|
|
|
except SynapseError as err:
|
|
|
|
# we may get SynapseErrors here as well as AuthErrors. For
|
|
|
|
# instance, there are a couple of (ancient) events in some
|
|
|
|
# rooms whose senders do not have the correct sigil; these
|
|
|
|
# cause SynapseErrors in auth.check. We don't want to give up
|
|
|
|
# the attempt to federate altogether in such cases.
|
|
|
|
|
2015-10-06 08:58:21 -06:00
|
|
|
logger.warn(
|
|
|
|
"Rejecting %s because %s",
|
|
|
|
e.event_id, err.msg
|
|
|
|
)
|
|
|
|
|
|
|
|
if e == event:
|
|
|
|
raise
|
|
|
|
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
|
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._persist_events(
|
2015-10-06 08:58:21 -06:00
|
|
|
[
|
|
|
|
(e, events_to_context[e.event_id])
|
|
|
|
for e in itertools.chain(auth_events, state)
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
new_event_context = yield self.state_handler.compute_event_context(
|
2016-03-31 08:32:24 -06:00
|
|
|
event, old_state=state
|
2015-10-06 08:58:21 -06:00
|
|
|
)
|
|
|
|
|
2018-07-25 09:00:38 -06:00
|
|
|
yield self._persist_events(
|
|
|
|
[(event, new_event_context)],
|
2015-10-06 08:58:21 -06:00
|
|
|
)
|
|
|
|
|
2015-06-25 10:18:19 -06:00
|
|
|
@defer.inlineCallbacks
|
2015-11-10 09:57:13 -07:00
|
|
|
def _prep_event(self, origin, event, state=None, auth_events=None):
|
2017-03-17 05:51:13 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
Args:
|
|
|
|
origin:
|
|
|
|
event:
|
|
|
|
state:
|
|
|
|
auth_events:
|
2015-06-03 09:30:01 -06:00
|
|
|
|
2017-03-17 05:51:13 -06:00
|
|
|
Returns:
|
|
|
|
Deferred, which resolves to synapse.events.snapshot.EventContext
|
|
|
|
"""
|
2014-12-16 08:59:17 -07:00
|
|
|
context = yield self.state_handler.compute_event_context(
|
2016-03-31 08:32:24 -06:00
|
|
|
event, old_state=state,
|
2014-11-25 04:31:18 -07:00
|
|
|
)
|
|
|
|
|
2015-01-29 09:50:23 -07:00
|
|
|
if not auth_events:
|
2018-07-23 06:00:22 -06:00
|
|
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
2016-08-25 10:32:22 -06:00
|
|
|
auth_events_ids = yield self.auth.compute_auth_events(
|
2018-07-23 06:00:22 -06:00
|
|
|
event, prev_state_ids, for_verification=True,
|
2016-08-25 10:32:22 -06:00
|
|
|
)
|
|
|
|
auth_events = yield self.store.get_events(auth_events_ids)
|
|
|
|
auth_events = {
|
|
|
|
(e.type, e.state_key): e for e in auth_events.values()
|
|
|
|
}
|
2015-01-29 09:50:23 -07:00
|
|
|
|
|
|
|
# This is a hack to fix some old rooms where the initial join event
|
|
|
|
# didn't reference the create event in its auth events.
|
2014-12-16 04:29:05 -07:00
|
|
|
if event.type == EventTypes.Member and not event.auth_events:
|
2015-05-19 07:15:05 -06:00
|
|
|
if len(event.prev_events) == 1 and event.depth < 5:
|
|
|
|
c = yield self.store.get_event(
|
|
|
|
event.prev_events[0][0],
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
if c and c.type == EventTypes.Create:
|
2015-01-29 09:50:23 -07:00
|
|
|
auth_events[(c.type, c.state_key)] = c
|
2014-12-10 03:06:12 -07:00
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
try:
|
2015-01-29 09:50:23 -07:00
|
|
|
yield self.do_auth(
|
|
|
|
origin, event, context, auth_events=auth_events
|
|
|
|
)
|
|
|
|
except AuthError as e:
|
|
|
|
logger.warn(
|
|
|
|
"Rejecting %s because %s",
|
|
|
|
event.event_id, e.msg
|
|
|
|
)
|
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
context.rejected = RejectedReason.AUTH_ERROR
|
|
|
|
|
2017-09-19 01:52:52 -06:00
|
|
|
if event.type == EventTypes.GuestAccess and not context.rejected:
|
2016-08-25 10:32:22 -06:00
|
|
|
yield self.maybe_kick_guest_users(event)
|
2015-11-10 09:57:13 -07:00
|
|
|
|
2015-06-25 10:18:19 -06:00
|
|
|
defer.returnValue(context)
|
2015-01-29 09:50:23 -07:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2018-08-02 04:53:52 -06:00
|
|
|
def on_query_auth(self, origin, event_id, room_id, remote_auth_chain, rejects,
|
2015-01-29 09:50:23 -07:00
|
|
|
missing):
|
2018-08-02 04:53:52 -06:00
|
|
|
in_room = yield self.auth.check_host_in_room(
|
|
|
|
room_id,
|
|
|
|
origin
|
|
|
|
)
|
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
|
|
|
|
2018-08-02 06:23:48 -06:00
|
|
|
event = yield self.store.get_event(
|
|
|
|
event_id, allow_none=False, check_room_id=room_id
|
|
|
|
)
|
2018-08-02 04:53:52 -06:00
|
|
|
|
2015-01-29 09:50:23 -07:00
|
|
|
# Just go through and process each event in `remote_auth_chain`. We
|
|
|
|
# don't want to fall into the trap of `missing` being wrong.
|
|
|
|
for e in remote_auth_chain:
|
|
|
|
try:
|
|
|
|
yield self._handle_new_event(origin, e)
|
|
|
|
except AuthError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Now get the current auth_chain for the event.
|
2017-05-24 07:22:41 -06:00
|
|
|
local_auth_chain = yield self.store.get_auth_chain(
|
|
|
|
[auth_id for auth_id, _ in event.auth_events],
|
|
|
|
include_given=True
|
|
|
|
)
|
2015-01-29 09:50:23 -07:00
|
|
|
|
|
|
|
# TODO: Check if we would now reject event_id. If so we need to tell
|
|
|
|
# everyone.
|
|
|
|
|
|
|
|
ret = yield self.construct_auth_difference(
|
|
|
|
local_auth_chain, remote_auth_chain
|
2014-12-10 03:06:12 -07:00
|
|
|
)
|
|
|
|
|
2015-02-03 06:23:58 -07:00
|
|
|
logger.debug("on_query_auth returning: %s", ret)
|
2015-01-29 09:50:23 -07:00
|
|
|
|
|
|
|
defer.returnValue(ret)
|
2015-01-28 09:16:53 -07:00
|
|
|
|
2015-02-23 06:58:02 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def on_get_missing_events(self, origin, room_id, earliest_events,
|
|
|
|
latest_events, limit, min_depth):
|
|
|
|
in_room = yield self.auth.check_host_in_room(
|
|
|
|
room_id,
|
|
|
|
origin
|
|
|
|
)
|
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
|
|
|
|
|
|
|
limit = min(limit, 20)
|
|
|
|
min_depth = max(min_depth, 0)
|
|
|
|
|
|
|
|
missing_events = yield self.store.get_missing_events(
|
|
|
|
room_id=room_id,
|
|
|
|
earliest_events=earliest_events,
|
|
|
|
latest_events=latest_events,
|
|
|
|
limit=limit,
|
|
|
|
min_depth=min_depth,
|
|
|
|
)
|
|
|
|
|
2018-07-16 04:38:45 -06:00
|
|
|
missing_events = yield filter_events_for_server(
|
|
|
|
self.store, origin, missing_events,
|
2018-06-08 04:34:46 -06:00
|
|
|
)
|
|
|
|
|
2015-02-23 06:58:02 -07:00
|
|
|
defer.returnValue(missing_events)
|
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
@defer.inlineCallbacks
|
2015-01-29 09:50:23 -07:00
|
|
|
@log_function
|
|
|
|
def do_auth(self, origin, event, context, auth_events):
|
2017-11-07 09:43:00 -07:00
|
|
|
"""
|
|
|
|
|
|
|
|
Args:
|
|
|
|
origin (str):
|
|
|
|
event (synapse.events.FrozenEvent):
|
|
|
|
context (synapse.events.snapshot.EventContext):
|
|
|
|
auth_events (dict[(str, str)->str]):
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
defer.Deferred[None]
|
|
|
|
"""
|
2015-01-29 09:50:23 -07:00
|
|
|
# Check if we have all the auth events.
|
2015-06-25 10:18:19 -06:00
|
|
|
current_state = set(e.event_id for e in auth_events.values())
|
2015-01-29 09:50:23 -07:00
|
|
|
event_auth_events = set(e_id for e_id, _ in event.auth_events)
|
2015-06-25 10:18:19 -06:00
|
|
|
|
2016-08-31 06:55:02 -06:00
|
|
|
if event.is_state():
|
|
|
|
event_key = (event.type, event.state_key)
|
|
|
|
else:
|
|
|
|
event_key = None
|
|
|
|
|
2015-06-25 10:18:19 -06:00
|
|
|
if event_auth_events - current_state:
|
2018-04-17 11:30:53 -06:00
|
|
|
# TODO: can we use store.have_seen_events here instead?
|
|
|
|
have_events = yield self.store.get_seen_events_with_rejections(
|
2015-06-25 10:18:19 -06:00
|
|
|
event_auth_events - current_state
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
have_events = {}
|
|
|
|
|
|
|
|
have_events.update({
|
|
|
|
e.event_id: ""
|
|
|
|
for e in auth_events.values()
|
|
|
|
})
|
|
|
|
|
2015-02-06 07:16:50 -07:00
|
|
|
seen_events = set(have_events.keys())
|
2015-01-28 09:16:53 -07:00
|
|
|
|
2015-06-25 10:18:19 -06:00
|
|
|
missing_auth = event_auth_events - seen_events - current_state
|
2015-01-28 09:16:53 -07:00
|
|
|
|
|
|
|
if missing_auth:
|
2015-02-17 04:51:22 -07:00
|
|
|
logger.info("Missing auth: %s", missing_auth)
|
2015-01-29 09:50:23 -07:00
|
|
|
# If we don't have all the auth events, we need to get them.
|
2015-02-03 06:57:54 -07:00
|
|
|
try:
|
2018-07-31 08:44:05 -06:00
|
|
|
remote_auth_chain = yield self.federation_client.get_event_auth(
|
2015-02-03 06:57:54 -07:00
|
|
|
origin, event.room_id, event.event_id
|
|
|
|
)
|
2015-01-29 09:50:23 -07:00
|
|
|
|
2018-04-17 11:30:53 -06:00
|
|
|
seen_remotes = yield self.store.have_seen_events(
|
2015-02-03 06:57:54 -07:00
|
|
|
[e.event_id for e in remote_auth_chain]
|
|
|
|
)
|
2015-01-30 09:51:58 -07:00
|
|
|
|
2015-02-03 06:57:54 -07:00
|
|
|
for e in remote_auth_chain:
|
2018-04-17 11:30:53 -06:00
|
|
|
if e.event_id in seen_remotes:
|
2015-02-03 06:57:54 -07:00
|
|
|
continue
|
2015-01-30 09:51:58 -07:00
|
|
|
|
2015-02-03 06:57:54 -07:00
|
|
|
if e.event_id == event.event_id:
|
|
|
|
continue
|
2015-01-30 09:51:58 -07:00
|
|
|
|
2015-02-03 06:57:54 -07:00
|
|
|
try:
|
|
|
|
auth_ids = [e_id for e_id, _ in e.auth_events]
|
|
|
|
auth = {
|
|
|
|
(e.type, e.state_key): e for e in remote_auth_chain
|
2015-10-02 06:11:49 -06:00
|
|
|
if e.event_id in auth_ids or e.type == EventTypes.Create
|
2015-02-03 06:57:54 -07:00
|
|
|
}
|
|
|
|
e.internal_metadata.outlier = True
|
2015-01-30 09:51:58 -07:00
|
|
|
|
2015-02-03 06:57:54 -07:00
|
|
|
logger.debug(
|
|
|
|
"do_auth %s missing_auth: %s",
|
|
|
|
event.event_id, e.event_id
|
|
|
|
)
|
|
|
|
yield self._handle_new_event(
|
|
|
|
origin, e, auth_events=auth
|
|
|
|
)
|
2015-01-30 09:51:58 -07:00
|
|
|
|
2015-02-03 06:57:54 -07:00
|
|
|
if e.event_id in event_auth_events:
|
|
|
|
auth_events[(e.type, e.state_key)] = e
|
|
|
|
except AuthError:
|
|
|
|
pass
|
2015-02-06 07:16:50 -07:00
|
|
|
|
2018-04-17 11:30:53 -06:00
|
|
|
have_events = yield self.store.get_seen_events_with_rejections(
|
2015-02-06 07:16:50 -07:00
|
|
|
[e_id for e_id, _ in event.auth_events]
|
|
|
|
)
|
|
|
|
seen_events = set(have_events.keys())
|
2017-10-23 08:52:32 -06:00
|
|
|
except Exception:
|
2015-02-03 06:57:54 -07:00
|
|
|
# FIXME:
|
|
|
|
logger.exception("Failed to get auth chain")
|
2015-01-29 09:50:23 -07:00
|
|
|
|
2015-01-30 03:48:47 -07:00
|
|
|
# FIXME: Assumes we have and stored all the state for all the
|
|
|
|
# prev_events
|
2015-01-29 09:50:23 -07:00
|
|
|
current_state = set(e.event_id for e in auth_events.values())
|
|
|
|
different_auth = event_auth_events - current_state
|
|
|
|
|
|
|
|
if different_auth and not event.internal_metadata.is_outlier():
|
2015-01-28 09:16:53 -07:00
|
|
|
# Do auth conflict res.
|
2015-02-17 04:51:22 -07:00
|
|
|
logger.info("Different auth: %s", different_auth)
|
2015-01-28 09:16:53 -07:00
|
|
|
|
2017-10-06 15:14:24 -06:00
|
|
|
different_events = yield logcontext.make_deferred_yieldable(
|
|
|
|
defer.gatherResults([
|
2018-04-27 04:29:27 -06:00
|
|
|
logcontext.run_in_background(
|
|
|
|
self.store.get_event,
|
2015-02-13 07:20:05 -07:00
|
|
|
d,
|
|
|
|
allow_none=True,
|
|
|
|
allow_rejected=False,
|
|
|
|
)
|
|
|
|
for d in different_auth
|
|
|
|
if d in have_events and not have_events[d]
|
2017-10-06 15:14:24 -06:00
|
|
|
], consumeErrors=True)
|
|
|
|
).addErrback(unwrapFirstError)
|
2015-02-13 07:20:05 -07:00
|
|
|
|
|
|
|
if different_events:
|
|
|
|
local_view = dict(auth_events)
|
|
|
|
remote_view = dict(auth_events)
|
|
|
|
remote_view.update({
|
2016-07-05 03:28:51 -06:00
|
|
|
(d.type, d.state_key): d for d in different_events if d
|
2015-02-13 07:20:05 -07:00
|
|
|
})
|
|
|
|
|
2017-01-13 06:16:54 -07:00
|
|
|
new_state = self.state_handler.resolve_events(
|
2018-05-31 03:03:47 -06:00
|
|
|
[list(local_view.values()), list(remote_view.values())],
|
2015-02-13 07:20:05 -07:00
|
|
|
event
|
|
|
|
)
|
|
|
|
|
|
|
|
auth_events.update(new_state)
|
|
|
|
|
|
|
|
current_state = set(e.event_id for e in auth_events.values())
|
|
|
|
different_auth = event_auth_events - current_state
|
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
yield self._update_context_for_auth_events(
|
|
|
|
event, context, auth_events, event_key,
|
2017-11-07 09:43:00 -07:00
|
|
|
)
|
2015-02-13 07:20:05 -07:00
|
|
|
|
|
|
|
if different_auth and not event.internal_metadata.is_outlier():
|
2015-02-17 04:51:22 -07:00
|
|
|
logger.info("Different auth after resolution: %s", different_auth)
|
|
|
|
|
2015-02-06 07:16:50 -07:00
|
|
|
# Only do auth resolution if we have something new to say.
|
|
|
|
# We can't rove an auth failure.
|
|
|
|
do_resolution = False
|
2015-02-06 08:16:26 -07:00
|
|
|
|
|
|
|
provable = [
|
|
|
|
RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
|
|
|
|
]
|
|
|
|
|
2015-02-06 07:16:50 -07:00
|
|
|
for e_id in different_auth:
|
|
|
|
if e_id in have_events:
|
2015-02-06 08:16:26 -07:00
|
|
|
if have_events[e_id] in provable:
|
2015-02-06 07:16:50 -07:00
|
|
|
do_resolution = True
|
|
|
|
break
|
|
|
|
|
|
|
|
if do_resolution:
|
2018-07-23 06:00:22 -06:00
|
|
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
2015-02-06 07:16:50 -07:00
|
|
|
# 1. Get what we think is the auth chain.
|
2016-08-25 10:32:22 -06:00
|
|
|
auth_ids = yield self.auth.compute_auth_events(
|
2018-07-23 06:00:22 -06:00
|
|
|
event, prev_state_ids
|
2015-02-03 06:57:54 -07:00
|
|
|
)
|
2017-05-24 07:22:41 -06:00
|
|
|
local_auth_chain = yield self.store.get_auth_chain(
|
|
|
|
auth_ids, include_given=True
|
|
|
|
)
|
2015-01-30 09:51:58 -07:00
|
|
|
|
2015-02-06 07:16:50 -07:00
|
|
|
try:
|
|
|
|
# 2. Get remote difference.
|
2018-07-31 08:44:05 -06:00
|
|
|
result = yield self.federation_client.query_auth(
|
2015-02-06 07:16:50 -07:00
|
|
|
origin,
|
|
|
|
event.room_id,
|
|
|
|
event.event_id,
|
|
|
|
local_auth_chain,
|
|
|
|
)
|
2015-01-30 09:51:58 -07:00
|
|
|
|
2018-04-17 11:30:53 -06:00
|
|
|
seen_remotes = yield self.store.have_seen_events(
|
2015-02-06 07:16:50 -07:00
|
|
|
[e.event_id for e in result["auth_chain"]]
|
|
|
|
)
|
2015-02-03 06:57:54 -07:00
|
|
|
|
2015-02-06 07:16:50 -07:00
|
|
|
# 3. Process any remote auth chain events we haven't seen.
|
|
|
|
for ev in result["auth_chain"]:
|
2018-04-17 11:30:53 -06:00
|
|
|
if ev.event_id in seen_remotes:
|
2015-02-06 07:16:50 -07:00
|
|
|
continue
|
|
|
|
|
|
|
|
if ev.event_id == event.event_id:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
auth_ids = [e_id for e_id, _ in ev.auth_events]
|
|
|
|
auth = {
|
|
|
|
(e.type, e.state_key): e
|
|
|
|
for e in result["auth_chain"]
|
|
|
|
if e.event_id in auth_ids
|
2015-10-02 06:11:49 -06:00
|
|
|
or event.type == EventTypes.Create
|
2015-02-06 07:16:50 -07:00
|
|
|
}
|
|
|
|
ev.internal_metadata.outlier = True
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"do_auth %s different_auth: %s",
|
|
|
|
event.event_id, e.event_id
|
|
|
|
)
|
|
|
|
|
|
|
|
yield self._handle_new_event(
|
|
|
|
origin, ev, auth_events=auth
|
|
|
|
)
|
|
|
|
|
|
|
|
if ev.event_id in event_auth_events:
|
|
|
|
auth_events[(ev.type, ev.state_key)] = ev
|
|
|
|
except AuthError:
|
|
|
|
pass
|
2015-01-30 08:57:53 -07:00
|
|
|
|
2017-10-23 08:52:32 -06:00
|
|
|
except Exception:
|
2015-02-06 07:16:50 -07:00
|
|
|
# FIXME:
|
|
|
|
logger.exception("Failed to query auth chain")
|
2015-01-28 09:16:53 -07:00
|
|
|
|
2015-02-06 07:16:50 -07:00
|
|
|
# 4. Look at rejects and their proofs.
|
|
|
|
# TODO.
|
2015-01-28 09:16:53 -07:00
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
yield self._update_context_for_auth_events(
|
|
|
|
event, context, auth_events, event_key,
|
2017-11-07 09:43:00 -07:00
|
|
|
)
|
2015-01-30 04:08:52 -07:00
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
try:
|
2015-01-29 09:50:23 -07:00
|
|
|
self.auth.check(event, auth_events=auth_events)
|
2016-04-13 04:11:46 -06:00
|
|
|
except AuthError as e:
|
|
|
|
logger.warn("Failed auth resolution for %r because %s", event, e)
|
|
|
|
raise e
|
2015-01-28 09:16:53 -07:00
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _update_context_for_auth_events(self, event, context, auth_events,
|
2017-11-07 09:43:00 -07:00
|
|
|
event_key):
|
2018-02-06 07:31:24 -07:00
|
|
|
"""Update the state_ids in an event context after auth event resolution,
|
|
|
|
storing the changes as a new state group.
|
2017-11-07 09:43:00 -07:00
|
|
|
|
|
|
|
Args:
|
2018-02-06 07:31:24 -07:00
|
|
|
event (Event): The event we're handling the context for
|
|
|
|
|
2017-11-07 09:43:00 -07:00
|
|
|
context (synapse.events.snapshot.EventContext): event context
|
|
|
|
to be updated
|
|
|
|
|
|
|
|
auth_events (dict[(str, str)->str]): Events to update in the event
|
|
|
|
context.
|
|
|
|
|
|
|
|
event_key ((str, str)): (type, state_key) for the current event.
|
|
|
|
this will not be included in the current_state in the context.
|
|
|
|
"""
|
2017-11-07 09:43:00 -07:00
|
|
|
state_updates = {
|
2018-05-05 14:47:18 -06:00
|
|
|
k: a.event_id for k, a in iteritems(auth_events)
|
2017-11-07 09:43:00 -07:00
|
|
|
if k != event_key
|
2017-11-07 09:43:00 -07:00
|
|
|
}
|
2018-07-23 06:02:09 -06:00
|
|
|
current_state_ids = yield context.get_current_state_ids(self.store)
|
|
|
|
current_state_ids = dict(current_state_ids)
|
|
|
|
|
|
|
|
current_state_ids.update(state_updates)
|
|
|
|
|
|
|
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
|
|
|
prev_state_ids = dict(prev_state_ids)
|
|
|
|
|
|
|
|
prev_state_ids.update({
|
2018-05-05 14:47:18 -06:00
|
|
|
k: a.event_id for k, a in iteritems(auth_events)
|
2017-11-07 09:43:00 -07:00
|
|
|
})
|
2018-07-23 06:02:09 -06:00
|
|
|
|
2018-07-23 15:06:50 -06:00
|
|
|
# create a new state group as a delta from the existing one.
|
|
|
|
prev_group = context.state_group
|
2018-07-23 06:02:09 -06:00
|
|
|
state_group = yield self.store.store_state_group(
|
2018-02-06 07:31:24 -07:00
|
|
|
event.event_id,
|
|
|
|
event.room_id,
|
2018-07-23 15:06:50 -06:00
|
|
|
prev_group=prev_group,
|
|
|
|
delta_ids=state_updates,
|
2018-07-23 06:02:09 -06:00
|
|
|
current_state_ids=current_state_ids,
|
|
|
|
)
|
|
|
|
|
|
|
|
yield context.update_state(
|
|
|
|
state_group=state_group,
|
|
|
|
current_state_ids=current_state_ids,
|
|
|
|
prev_state_ids=prev_state_ids,
|
2018-07-23 15:06:50 -06:00
|
|
|
prev_group=prev_group,
|
|
|
|
delta_ids=state_updates,
|
2018-02-06 07:31:24 -07:00
|
|
|
)
|
2017-11-07 09:43:00 -07:00
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def construct_auth_difference(self, local_auth, remote_auth):
|
|
|
|
""" Given a local and remote auth chain, find the differences. This
|
|
|
|
assumes that we have already processed all events in remote_auth
|
|
|
|
|
|
|
|
Params:
|
|
|
|
local_auth (list)
|
|
|
|
remote_auth (list)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict
|
|
|
|
"""
|
|
|
|
|
2015-01-29 09:50:23 -07:00
|
|
|
logger.debug("construct_auth_difference Start!")
|
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
# TODO: Make sure we are OK with local_auth or remote_auth having more
|
|
|
|
# auth events in them than strictly necessary.
|
|
|
|
|
|
|
|
def sort_fun(ev):
|
|
|
|
return ev.depth, ev.event_id
|
|
|
|
|
2015-01-29 09:50:23 -07:00
|
|
|
logger.debug("construct_auth_difference after sort_fun!")
|
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
# We find the differences by starting at the "bottom" of each list
|
|
|
|
# and iterating up on both lists. The lists are ordered by depth and
|
|
|
|
# then event_id, we iterate up both lists until we find the event ids
|
|
|
|
# don't match. Then we look at depth/event_id to see which side is
|
|
|
|
# missing that event, and iterate only up that list. Repeat.
|
|
|
|
|
|
|
|
remote_list = list(remote_auth)
|
|
|
|
remote_list.sort(key=sort_fun)
|
|
|
|
|
|
|
|
local_list = list(local_auth)
|
|
|
|
local_list.sort(key=sort_fun)
|
|
|
|
|
|
|
|
local_iter = iter(local_list)
|
|
|
|
remote_iter = iter(remote_list)
|
|
|
|
|
2015-01-29 09:50:23 -07:00
|
|
|
logger.debug("construct_auth_difference before get_next!")
|
2015-01-28 09:16:53 -07:00
|
|
|
|
|
|
|
def get_next(it, opt=None):
|
2015-01-29 09:50:23 -07:00
|
|
|
try:
|
2018-05-05 14:47:18 -06:00
|
|
|
return next(it)
|
2017-10-23 08:52:32 -06:00
|
|
|
except Exception:
|
2015-01-29 09:50:23 -07:00
|
|
|
return opt
|
|
|
|
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
|
|
|
|
logger.debug("construct_auth_difference before while")
|
2015-01-28 09:16:53 -07:00
|
|
|
|
|
|
|
missing_remotes = []
|
|
|
|
missing_locals = []
|
2015-01-30 03:48:47 -07:00
|
|
|
while current_local or current_remote:
|
2015-01-28 09:16:53 -07:00
|
|
|
if current_remote is None:
|
|
|
|
missing_locals.append(current_local)
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if current_local is None:
|
|
|
|
missing_remotes.append(current_remote)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if current_local.event_id == current_remote.event_id:
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if current_local.depth < current_remote.depth:
|
|
|
|
missing_locals.append(current_local)
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if current_local.depth > current_remote.depth:
|
|
|
|
missing_remotes.append(current_remote)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# They have the same depth, so we fall back to the event_id order
|
|
|
|
if current_local.event_id < current_remote.event_id:
|
|
|
|
missing_locals.append(current_local)
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
|
|
|
|
if current_local.event_id > current_remote.event_id:
|
|
|
|
missing_remotes.append(current_remote)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
continue
|
|
|
|
|
2015-01-29 09:50:23 -07:00
|
|
|
logger.debug("construct_auth_difference after while")
|
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
# missing locals should be sent to the server
|
|
|
|
# We should find why we are missing remotes, as they will have been
|
|
|
|
# rejected.
|
|
|
|
|
|
|
|
# Remove events from missing_remotes if they are referencing a missing
|
|
|
|
# remote. We only care about the "root" rejected ones.
|
|
|
|
missing_remote_ids = [e.event_id for e in missing_remotes]
|
|
|
|
base_remote_rejected = list(missing_remotes)
|
|
|
|
for e in missing_remotes:
|
|
|
|
for e_id, _ in e.auth_events:
|
|
|
|
if e_id in missing_remote_ids:
|
2015-02-06 03:53:18 -07:00
|
|
|
try:
|
|
|
|
base_remote_rejected.remove(e)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2015-01-28 09:16:53 -07:00
|
|
|
|
|
|
|
reason_map = {}
|
|
|
|
|
|
|
|
for e in base_remote_rejected:
|
|
|
|
reason = yield self.store.get_rejection_reason(e.event_id)
|
|
|
|
if reason is None:
|
2015-02-06 07:16:50 -07:00
|
|
|
# TODO: e is not in the current state, so we should
|
|
|
|
# construct some proof of that.
|
|
|
|
continue
|
2015-01-28 09:16:53 -07:00
|
|
|
|
|
|
|
reason_map[e.event_id] = reason
|
|
|
|
|
|
|
|
if reason == RejectedReason.AUTH_ERROR:
|
|
|
|
pass
|
|
|
|
elif reason == RejectedReason.REPLACED:
|
|
|
|
# TODO: Get proof
|
|
|
|
pass
|
|
|
|
elif reason == RejectedReason.NOT_ANCESTOR:
|
|
|
|
# TODO: Get proof.
|
|
|
|
pass
|
|
|
|
|
2015-01-29 09:50:23 -07:00
|
|
|
logger.debug("construct_auth_difference returning")
|
|
|
|
|
2015-01-28 09:16:53 -07:00
|
|
|
defer.returnValue({
|
2015-01-29 09:50:23 -07:00
|
|
|
"auth_chain": local_auth,
|
2015-01-28 09:16:53 -07:00
|
|
|
"rejects": {
|
|
|
|
e.event_id: {
|
|
|
|
"reason": reason_map[e.event_id],
|
|
|
|
"proof": None,
|
|
|
|
}
|
|
|
|
for e in base_remote_rejected
|
|
|
|
},
|
2015-01-30 06:34:01 -07:00
|
|
|
"missing": [e.event_id for e in missing_locals],
|
2015-01-28 09:16:53 -07:00
|
|
|
})
|
2015-11-05 09:43:19 -07:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
2016-02-23 08:11:25 -07:00
|
|
|
def exchange_third_party_invite(
|
|
|
|
self,
|
|
|
|
sender_user_id,
|
|
|
|
target_user_id,
|
|
|
|
room_id,
|
|
|
|
signed,
|
|
|
|
):
|
2015-12-17 10:09:51 -07:00
|
|
|
third_party_invite = {
|
2016-02-23 08:11:25 -07:00
|
|
|
"signed": signed,
|
2015-12-17 10:09:51 -07:00
|
|
|
}
|
|
|
|
|
2015-11-05 09:43:19 -07:00
|
|
|
event_dict = {
|
|
|
|
"type": EventTypes.Member,
|
|
|
|
"content": {
|
|
|
|
"membership": Membership.INVITE,
|
2015-12-17 10:09:51 -07:00
|
|
|
"third_party_invite": third_party_invite,
|
2015-11-05 09:43:19 -07:00
|
|
|
},
|
|
|
|
"room_id": room_id,
|
2016-02-23 08:11:25 -07:00
|
|
|
"sender": sender_user_id,
|
|
|
|
"state_key": target_user_id,
|
2015-11-05 09:43:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
|
|
|
|
builder = self.event_builder_factory.new(event_dict)
|
|
|
|
EventValidator().validate_new(builder)
|
2018-02-06 09:31:50 -07:00
|
|
|
event, context = yield self.event_creation_handler.create_new_client_event(
|
2016-05-11 02:09:20 -06:00
|
|
|
builder=builder
|
|
|
|
)
|
2015-12-17 10:31:20 -07:00
|
|
|
|
|
|
|
event, context = yield self.add_display_name_to_third_party_invite(
|
|
|
|
event_dict, event, context
|
|
|
|
)
|
|
|
|
|
2016-04-13 04:11:46 -06:00
|
|
|
try:
|
2016-08-25 10:32:22 -06:00
|
|
|
yield self.auth.check_from_context(event, context)
|
2016-04-13 04:11:46 -06:00
|
|
|
except AuthError as e:
|
|
|
|
logger.warn("Denying new third party invite %r because %s", event, e)
|
|
|
|
raise e
|
|
|
|
|
2016-08-25 10:32:22 -06:00
|
|
|
yield self._check_signature(event, context)
|
2018-03-01 03:54:37 -07:00
|
|
|
member_handler = self.hs.get_room_member_handler()
|
2016-03-03 09:43:42 -07:00
|
|
|
yield member_handler.send_membership_event(None, event, context)
|
2015-11-05 09:43:19 -07:00
|
|
|
else:
|
2016-02-23 08:11:25 -07:00
|
|
|
destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id))
|
2018-07-31 08:44:05 -06:00
|
|
|
yield self.federation_client.forward_third_party_invite(
|
2015-11-05 09:43:19 -07:00
|
|
|
destinations,
|
|
|
|
room_id,
|
|
|
|
event_dict,
|
|
|
|
)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
|
2017-09-19 05:18:01 -06:00
|
|
|
"""Handle an exchange_third_party_invite request from a remote server
|
|
|
|
|
|
|
|
The remote server will call this when it wants to turn a 3pid invite
|
|
|
|
into a normal m.room.member invite.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred: resolves (to None)
|
|
|
|
"""
|
2015-11-05 09:43:19 -07:00
|
|
|
builder = self.event_builder_factory.new(event_dict)
|
|
|
|
|
2018-02-06 09:31:50 -07:00
|
|
|
event, context = yield self.event_creation_handler.create_new_client_event(
|
2015-11-05 09:43:19 -07:00
|
|
|
builder=builder,
|
|
|
|
)
|
|
|
|
|
2015-12-17 10:31:20 -07:00
|
|
|
event, context = yield self.add_display_name_to_third_party_invite(
|
|
|
|
event_dict, event, context
|
|
|
|
)
|
|
|
|
|
2016-04-13 04:11:46 -06:00
|
|
|
try:
|
2016-08-25 10:32:22 -06:00
|
|
|
self.auth.check_from_context(event, context)
|
2016-04-13 04:11:46 -06:00
|
|
|
except AuthError as e:
|
|
|
|
logger.warn("Denying third party invite %r because %s", event, e)
|
|
|
|
raise e
|
2016-08-25 10:32:22 -06:00
|
|
|
yield self._check_signature(event, context)
|
2015-11-05 09:43:19 -07:00
|
|
|
|
2017-09-19 05:18:01 -06:00
|
|
|
# XXX we send the invite here, but send_membership_event also sends it,
|
|
|
|
# so we end up making two requests. I think this is redundant.
|
2015-11-05 09:43:19 -07:00
|
|
|
returned_invite = yield self.send_invite(origin, event)
|
|
|
|
# TODO: Make sure the signatures actually are correct.
|
|
|
|
event.signatures.update(returned_invite.signatures)
|
2017-09-19 05:18:01 -06:00
|
|
|
|
2018-03-01 03:54:37 -07:00
|
|
|
member_handler = self.hs.get_room_member_handler()
|
2016-03-03 09:43:42 -07:00
|
|
|
yield member_handler.send_membership_event(None, event, context)
|
2015-11-05 09:43:19 -07:00
|
|
|
|
2015-12-17 10:31:20 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def add_display_name_to_third_party_invite(self, event_dict, event, context):
|
|
|
|
key = (
|
|
|
|
EventTypes.ThirdPartyInvite,
|
|
|
|
event.content["third_party_invite"]["signed"]["token"]
|
|
|
|
)
|
2016-08-25 10:32:22 -06:00
|
|
|
original_invite = None
|
2018-07-23 06:00:22 -06:00
|
|
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
|
|
|
original_invite_id = prev_state_ids.get(key)
|
2016-08-25 10:32:22 -06:00
|
|
|
if original_invite_id:
|
|
|
|
original_invite = yield self.store.get_event(
|
|
|
|
original_invite_id, allow_none=True
|
|
|
|
)
|
2016-09-22 03:56:53 -06:00
|
|
|
if original_invite:
|
|
|
|
display_name = original_invite.content["display_name"]
|
|
|
|
event_dict["content"]["third_party_invite"]["display_name"] = display_name
|
|
|
|
else:
|
2015-12-17 10:31:20 -07:00
|
|
|
logger.info(
|
2016-09-22 03:56:53 -06:00
|
|
|
"Could not find invite event for third_party_invite: %r",
|
|
|
|
event_dict
|
2015-12-17 10:31:20 -07:00
|
|
|
)
|
2016-09-22 04:59:46 -06:00
|
|
|
# We don't discard here as this is not the appropriate place to do
|
|
|
|
# auth checks. If we need the invite and don't have it then the
|
|
|
|
# auth check code will explode appropriately.
|
2015-12-17 10:31:20 -07:00
|
|
|
|
|
|
|
builder = self.event_builder_factory.new(event_dict)
|
|
|
|
EventValidator().validate_new(builder)
|
2018-02-06 09:31:50 -07:00
|
|
|
event, context = yield self.event_creation_handler.create_new_client_event(
|
2018-01-15 09:52:07 -07:00
|
|
|
builder=builder,
|
|
|
|
)
|
2015-12-17 10:31:20 -07:00
|
|
|
defer.returnValue((event, context))
|
|
|
|
|
2015-11-05 09:43:19 -07:00
|
|
|
@defer.inlineCallbacks
|
2016-08-25 10:32:22 -06:00
|
|
|
def _check_signature(self, event, context):
|
2016-02-23 08:11:25 -07:00
|
|
|
"""
|
|
|
|
Checks that the signature in the event is consistent with its invite.
|
|
|
|
|
2016-04-01 09:08:59 -06:00
|
|
|
Args:
|
|
|
|
event (Event): The m.room.member event to check
|
2016-08-25 10:32:22 -06:00
|
|
|
context (EventContext):
|
2016-04-01 09:08:59 -06:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
AuthError: if signature didn't match any keys, or key has been
|
2016-02-23 08:11:25 -07:00
|
|
|
revoked,
|
2016-04-01 09:08:59 -06:00
|
|
|
SynapseError: if a transient error meant a key couldn't be checked
|
2016-02-23 08:11:25 -07:00
|
|
|
for revocation.
|
|
|
|
"""
|
|
|
|
signed = event.content["third_party_invite"]["signed"]
|
|
|
|
token = signed["token"]
|
2015-11-05 09:43:19 -07:00
|
|
|
|
2018-07-23 06:00:22 -06:00
|
|
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
|
|
|
invite_event_id = prev_state_ids.get(
|
2015-11-05 09:43:19 -07:00
|
|
|
(EventTypes.ThirdPartyInvite, token,)
|
|
|
|
)
|
|
|
|
|
2016-08-25 10:32:22 -06:00
|
|
|
invite_event = None
|
|
|
|
if invite_event_id:
|
|
|
|
invite_event = yield self.store.get_event(invite_event_id, allow_none=True)
|
|
|
|
|
2016-02-23 08:11:25 -07:00
|
|
|
if not invite_event:
|
|
|
|
raise AuthError(403, "Could not find invite")
|
|
|
|
|
|
|
|
last_exception = None
|
|
|
|
for public_key_object in self.hs.get_auth().get_public_keys(invite_event):
|
|
|
|
try:
|
|
|
|
for server, signature_block in signed["signatures"].items():
|
|
|
|
for key_name, encoded_signature in signature_block.items():
|
|
|
|
if not key_name.startswith("ed25519:"):
|
|
|
|
continue
|
|
|
|
|
|
|
|
public_key = public_key_object["public_key"]
|
|
|
|
verify_key = decode_verify_key_bytes(
|
|
|
|
key_name,
|
|
|
|
decode_base64(public_key)
|
|
|
|
)
|
|
|
|
verify_signed_json(signed, server, verify_key)
|
|
|
|
if "key_validity_url" in public_key_object:
|
|
|
|
yield self._check_key_revocation(
|
|
|
|
public_key,
|
|
|
|
public_key_object["key_validity_url"]
|
|
|
|
)
|
|
|
|
return
|
|
|
|
except Exception as e:
|
|
|
|
last_exception = e
|
|
|
|
raise last_exception
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _check_key_revocation(self, public_key, url):
|
|
|
|
"""
|
|
|
|
Checks whether public_key has been revoked.
|
|
|
|
|
2016-04-01 09:08:59 -06:00
|
|
|
Args:
|
|
|
|
public_key (str): base-64 encoded public key.
|
|
|
|
url (str): Key revocation URL.
|
2016-02-23 08:11:25 -07:00
|
|
|
|
2016-04-01 09:08:59 -06:00
|
|
|
Raises:
|
|
|
|
AuthError: if they key has been revoked.
|
|
|
|
SynapseError: if a transient error meant a key couldn't be checked
|
2016-02-23 08:11:25 -07:00
|
|
|
for revocation.
|
|
|
|
"""
|
2015-11-05 09:43:19 -07:00
|
|
|
try:
|
|
|
|
response = yield self.hs.get_simple_http_client().get_json(
|
2016-02-23 08:11:25 -07:00
|
|
|
url,
|
|
|
|
{"public_key": public_key}
|
2015-11-05 09:43:19 -07:00
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
raise SynapseError(
|
|
|
|
502,
|
|
|
|
"Third party certificate could not be checked"
|
|
|
|
)
|
|
|
|
if "valid" not in response or not response["valid"]:
|
|
|
|
raise AuthError(403, "Third party certificate was invalid")
|
2018-07-25 09:00:38 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _persist_events(self, event_and_contexts, backfilled=False):
|
|
|
|
"""Persists events and tells the notifier/pushers about them, if
|
|
|
|
necessary.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
event_and_contexts(list[tuple[FrozenEvent, EventContext]])
|
|
|
|
backfilled (bool): Whether these events are a result of
|
|
|
|
backfilling or not
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred
|
|
|
|
"""
|
|
|
|
max_stream_id = yield self.store.persist_events(
|
|
|
|
event_and_contexts,
|
|
|
|
backfilled=backfilled,
|
|
|
|
)
|
|
|
|
|
|
|
|
if not backfilled: # Never notify for backfilled events
|
|
|
|
for event, _ in event_and_contexts:
|
|
|
|
self._notify_persisted_event(event, max_stream_id)
|
|
|
|
|
|
|
|
def _notify_persisted_event(self, event, max_stream_id):
|
|
|
|
"""Checks to see if notifier/pushers should be notified about the
|
|
|
|
event or not.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
event (FrozenEvent)
|
|
|
|
max_stream_id (int): The max_stream_id returned by persist_events
|
|
|
|
"""
|
|
|
|
|
|
|
|
extra_users = []
|
|
|
|
if event.type == EventTypes.Member:
|
|
|
|
target_user_id = event.state_key
|
|
|
|
|
|
|
|
# We notify for memberships if its an invite for one of our
|
|
|
|
# users
|
|
|
|
if event.internal_metadata.is_outlier():
|
|
|
|
if event.membership != Membership.INVITE:
|
|
|
|
if not self.is_mine_id(target_user_id):
|
|
|
|
return
|
|
|
|
|
|
|
|
target_user = UserID.from_string(target_user_id)
|
|
|
|
extra_users.append(target_user)
|
|
|
|
elif event.internal_metadata.is_outlier():
|
|
|
|
return
|
|
|
|
|
|
|
|
event_stream_id = event.internal_metadata.stream_ordering
|
|
|
|
self.notifier.on_new_room_event(
|
|
|
|
event, event_stream_id, max_stream_id,
|
|
|
|
extra_users=extra_users
|
|
|
|
)
|
|
|
|
|
|
|
|
logcontext.run_in_background(
|
|
|
|
self.pusher_pool.on_new_notifications,
|
|
|
|
event_stream_id, max_stream_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
def _clean_room_for_join(self, room_id):
|
|
|
|
return self.store.clean_room_for_join(room_id)
|
|
|
|
|
|
|
|
def user_joined_room(self, user, room_id):
|
|
|
|
"""Called when a new user has joined the room
|
|
|
|
"""
|
|
|
|
return user_joined_room(self.distributor, user, room_id)
|