2015-01-26 03:45:24 -07:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 21:26:29 -07:00
|
|
|
# Copyright 2015, 2016 OpenMarket Ltd
|
2015-01-26 03:45:24 -07:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
|
2017-12-30 11:40:19 -07:00
|
|
|
import copy
|
|
|
|
import itertools
|
|
|
|
import logging
|
2020-02-03 13:59:10 -07:00
|
|
|
from typing import (
|
|
|
|
Any,
|
|
|
|
Awaitable,
|
|
|
|
Callable,
|
|
|
|
Dict,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Optional,
|
2020-02-03 14:14:30 -07:00
|
|
|
Sequence,
|
2020-02-03 13:59:10 -07:00
|
|
|
Tuple,
|
|
|
|
TypeVar,
|
|
|
|
)
|
2017-12-30 11:40:19 -07:00
|
|
|
|
2018-07-09 00:09:20 -06:00
|
|
|
from prometheus_client import Counter
|
|
|
|
|
2015-01-26 03:45:24 -07:00
|
|
|
from twisted.internet import defer
|
|
|
|
|
2019-04-01 03:24:38 -06:00
|
|
|
from synapse.api.constants import EventTypes, Membership
|
2015-03-05 09:08:02 -07:00
|
|
|
from synapse.api.errors import (
|
2018-07-09 00:09:20 -06:00
|
|
|
CodeMessageException,
|
2019-02-23 07:31:08 -07:00
|
|
|
Codes,
|
2018-07-09 00:09:20 -06:00
|
|
|
FederationDeniedError,
|
|
|
|
HttpResponseException,
|
|
|
|
SynapseError,
|
2020-01-27 07:30:57 -07:00
|
|
|
UnsupportedRoomVersionError,
|
2015-03-05 09:08:02 -07:00
|
|
|
)
|
2019-04-01 03:24:38 -06:00
|
|
|
from synapse.api.room_versions import (
|
|
|
|
KNOWN_ROOM_VERSIONS,
|
|
|
|
EventFormatVersions,
|
2020-02-03 13:51:26 -07:00
|
|
|
RoomVersion,
|
2019-04-01 03:24:38 -06:00
|
|
|
RoomVersions,
|
|
|
|
)
|
2020-01-31 09:50:13 -07:00
|
|
|
from synapse.events import EventBase, builder
|
2018-07-09 00:09:20 -06:00
|
|
|
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
2019-12-10 10:42:46 -07:00
|
|
|
from synapse.logging.context import make_deferred_yieldable
|
2019-07-03 08:07:04 -06:00
|
|
|
from synapse.logging.utils import log_function
|
2020-02-05 08:49:42 -07:00
|
|
|
from synapse.types import JsonDict
|
2019-12-10 10:42:46 -07:00
|
|
|
from synapse.util import unwrapFirstError
|
2017-12-30 11:40:19 -07:00
|
|
|
from synapse.util.caches.expiringcache import ExpiringCache
|
2017-03-22 18:12:21 -06:00
|
|
|
from synapse.util.retryutils import NotRetryingDestination
|
2015-02-17 10:20:56 -07:00
|
|
|
|
2018-05-21 18:47:37 -06:00
|
|
|
logger = logging.getLogger(__name__)
|
2015-03-10 09:29:22 -06:00
|
|
|
|
2018-05-21 18:47:37 -06:00
|
|
|
sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"])
|
2015-02-24 11:10:44 -07:00
|
|
|
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2016-08-10 04:31:46 -06:00
|
|
|
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
|
|
|
|
2020-02-03 13:59:10 -07:00
|
|
|
T = TypeVar("T")
|
|
|
|
|
2016-08-10 04:31:46 -06:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
class InvalidResponseError(RuntimeError):
|
|
|
|
"""Helper for _try_destination_list: indicates that the server returned a response
|
|
|
|
we couldn't parse
|
|
|
|
"""
|
2019-06-20 03:32:02 -06:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
2015-02-03 07:58:30 -07:00
|
|
|
class FederationClient(FederationBase):
|
2016-06-15 08:12:59 -06:00
|
|
|
def __init__(self, hs):
|
|
|
|
super(FederationClient, self).__init__(hs)
|
2015-02-16 11:02:39 -07:00
|
|
|
|
2016-08-10 04:31:46 -06:00
|
|
|
self.pdu_destination_tried = {}
|
2019-06-20 03:32:02 -06:00
|
|
|
self._clock.looping_call(self._clear_tried_cache, 60 * 1000)
|
2016-08-26 07:54:30 -06:00
|
|
|
self.state = hs.get_state_handler()
|
2018-03-12 08:07:39 -06:00
|
|
|
self.transport_layer = hs.get_federation_transport_client()
|
2016-08-10 04:31:46 -06:00
|
|
|
|
2019-01-25 10:19:31 -07:00
|
|
|
self.hostname = hs.hostname
|
|
|
|
self.signing_key = hs.config.signing_key[0]
|
2019-01-16 08:13:07 -07:00
|
|
|
|
2018-09-21 07:19:46 -06:00
|
|
|
self._get_pdu_cache = ExpiringCache(
|
|
|
|
cache_name="get_pdu_cache",
|
|
|
|
clock=self._clock,
|
|
|
|
max_len=1000,
|
|
|
|
expiry_ms=120 * 1000,
|
|
|
|
reset_expiry_on_get=False,
|
|
|
|
)
|
|
|
|
|
2016-08-10 04:31:46 -06:00
|
|
|
def _clear_tried_cache(self):
|
|
|
|
"""Clear pdu_destination_tried cache"""
|
|
|
|
now = self._clock.time_msec()
|
|
|
|
|
|
|
|
old_dict = self.pdu_destination_tried
|
|
|
|
self.pdu_destination_tried = {}
|
|
|
|
|
|
|
|
for event_id, destination_dict in old_dict.items():
|
|
|
|
destination_dict = {
|
|
|
|
dest: time
|
|
|
|
for dest, time in destination_dict.items()
|
|
|
|
if time + PDU_RETRY_TIME_MS > now
|
|
|
|
}
|
|
|
|
if destination_dict:
|
|
|
|
self.pdu_destination_tried[event_id] = destination_dict
|
|
|
|
|
2015-01-26 03:45:24 -07:00
|
|
|
@log_function
|
2019-06-20 03:32:02 -06:00
|
|
|
def make_query(
|
|
|
|
self,
|
|
|
|
destination,
|
|
|
|
query_type,
|
|
|
|
args,
|
|
|
|
retry_on_dns_fail=False,
|
|
|
|
ignore_backoff=False,
|
|
|
|
):
|
2015-01-26 03:45:24 -07:00
|
|
|
"""Sends a federation Query to a remote homeserver of the given type
|
|
|
|
and arguments.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str): Domain name of the remote homeserver
|
|
|
|
query_type (str): Category of the query type; should match the
|
|
|
|
handler name used in register_query_handler().
|
|
|
|
args (dict): Mapping of strings to strings containing the details
|
|
|
|
of the query request.
|
2017-03-23 05:10:36 -06:00
|
|
|
ignore_backoff (bool): true to ignore the historical backoff data
|
|
|
|
and try the request anyway.
|
2015-01-26 03:45:24 -07:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
a Deferred which will eventually yield a JSON object from the
|
|
|
|
response
|
|
|
|
"""
|
2018-05-21 18:47:37 -06:00
|
|
|
sent_queries_counter.labels(query_type).inc()
|
2015-03-10 09:29:22 -06:00
|
|
|
|
2015-01-26 03:45:24 -07:00
|
|
|
return self.transport_layer.make_query(
|
2019-06-20 03:32:02 -06:00
|
|
|
destination,
|
|
|
|
query_type,
|
|
|
|
args,
|
|
|
|
retry_on_dns_fail=retry_on_dns_fail,
|
2017-03-23 05:10:36 -06:00
|
|
|
ignore_backoff=ignore_backoff,
|
2015-01-26 03:45:24 -07:00
|
|
|
)
|
|
|
|
|
2015-07-23 09:03:38 -06:00
|
|
|
@log_function
|
2016-09-12 11:17:09 -06:00
|
|
|
def query_client_keys(self, destination, content, timeout):
|
2015-07-23 09:03:38 -06:00
|
|
|
"""Query device keys for a device hosted on a remote server.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str): Domain name of the remote homeserver
|
|
|
|
content (dict): The query content.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
a Deferred which will eventually yield a JSON object from the
|
|
|
|
response
|
|
|
|
"""
|
2018-05-21 18:47:37 -06:00
|
|
|
sent_queries_counter.labels("client_device_keys").inc()
|
2019-06-20 03:32:02 -06:00
|
|
|
return self.transport_layer.query_client_keys(destination, content, timeout)
|
2015-07-23 09:03:38 -06:00
|
|
|
|
2017-01-26 09:06:54 -07:00
|
|
|
@log_function
|
|
|
|
def query_user_devices(self, destination, user_id, timeout=30000):
|
|
|
|
"""Query the device keys for a list of user ids hosted on a remote
|
|
|
|
server.
|
|
|
|
"""
|
2018-05-21 18:47:37 -06:00
|
|
|
sent_queries_counter.labels("user_devices").inc()
|
2019-06-20 03:32:02 -06:00
|
|
|
return self.transport_layer.query_user_devices(destination, user_id, timeout)
|
2017-01-26 09:06:54 -07:00
|
|
|
|
2015-07-23 09:03:38 -06:00
|
|
|
@log_function
|
2016-09-12 11:17:09 -06:00
|
|
|
def claim_client_keys(self, destination, content, timeout):
|
2015-07-23 09:03:38 -06:00
|
|
|
"""Claims one-time keys for a device hosted on a remote server.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str): Domain name of the remote homeserver
|
|
|
|
content (dict): The query content.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
a Deferred which will eventually yield a JSON object from the
|
|
|
|
response
|
|
|
|
"""
|
2018-05-21 18:47:37 -06:00
|
|
|
sent_queries_counter.labels("client_one_time_keys").inc()
|
2019-06-20 03:32:02 -06:00
|
|
|
return self.transport_layer.claim_client_keys(destination, content, timeout)
|
2015-07-23 09:03:38 -06:00
|
|
|
|
2020-02-03 13:35:40 -07:00
|
|
|
async def backfill(
|
|
|
|
self, dest: str, room_id: str, limit: int, extremities: Iterable[str]
|
2020-02-28 05:31:07 -07:00
|
|
|
) -> Optional[List[EventBase]]:
|
2020-02-03 13:35:40 -07:00
|
|
|
"""Requests some more historic PDUs for the given room from the
|
2015-01-26 03:45:24 -07:00
|
|
|
given destination server.
|
|
|
|
|
|
|
|
Args:
|
2019-11-12 06:08:12 -07:00
|
|
|
dest (str): The remote homeserver to ask.
|
2019-01-23 13:21:33 -07:00
|
|
|
room_id (str): The room_id to backfill.
|
2020-02-03 13:35:40 -07:00
|
|
|
limit (int): The maximum number of events to return.
|
|
|
|
extremities (list): our current backwards extremities, to backfill from
|
2015-01-26 03:45:24 -07:00
|
|
|
"""
|
|
|
|
logger.debug("backfill extrem=%s", extremities)
|
|
|
|
|
2020-02-28 05:31:07 -07:00
|
|
|
# If there are no extremities then we've (probably) reached the start.
|
2015-01-26 03:45:24 -07:00
|
|
|
if not extremities:
|
2020-02-28 05:31:07 -07:00
|
|
|
return None
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-02-03 13:35:40 -07:00
|
|
|
transaction_data = await self.transport_layer.backfill(
|
2019-06-20 03:32:02 -06:00
|
|
|
dest, room_id, extremities, limit
|
|
|
|
)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2019-10-24 11:17:33 -06:00
|
|
|
logger.debug("backfill transaction_data=%r", transaction_data)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-01-31 09:50:13 -07:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2019-01-23 13:21:33 -07:00
|
|
|
|
2015-01-26 03:45:24 -07:00
|
|
|
pdus = [
|
2020-01-31 09:50:13 -07:00
|
|
|
event_from_pdu_json(p, room_version, outlier=False)
|
2015-01-26 03:45:24 -07:00
|
|
|
for p in transaction_data["pdus"]
|
|
|
|
]
|
|
|
|
|
2015-05-20 04:59:02 -06:00
|
|
|
# FIXME: We should handle signature failures more gracefully.
|
2020-02-03 13:35:40 -07:00
|
|
|
pdus[:] = await make_deferred_yieldable(
|
2019-06-20 03:32:02 -06:00
|
|
|
defer.gatherResults(
|
2020-01-31 09:50:13 -07:00
|
|
|
self._check_sigs_and_hashes(room_version.identifier, pdus),
|
|
|
|
consumeErrors=True,
|
2019-06-20 03:32:02 -06:00
|
|
|
).addErrback(unwrapFirstError)
|
|
|
|
)
|
2015-01-26 07:33:11 -07:00
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return pdus
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-02-03 13:41:54 -07:00
|
|
|
async def get_pdu(
|
|
|
|
self,
|
|
|
|
destinations: Iterable[str],
|
|
|
|
event_id: str,
|
2020-01-31 07:07:31 -07:00
|
|
|
room_version: RoomVersion,
|
2020-02-03 13:41:54 -07:00
|
|
|
outlier: bool = False,
|
|
|
|
timeout: Optional[int] = None,
|
|
|
|
) -> Optional[EventBase]:
|
2015-01-26 03:45:24 -07:00
|
|
|
"""Requests the PDU with given origin and ID from the remote home
|
|
|
|
servers.
|
|
|
|
|
|
|
|
Will attempt to get the PDU from each destination in the list until
|
|
|
|
one succeeds.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 13:41:54 -07:00
|
|
|
destinations: Which homeservers to query
|
|
|
|
event_id: event to fetch
|
|
|
|
room_version: version of the room
|
|
|
|
outlier: Indicates whether the PDU is an `outlier`, i.e. if
|
2015-01-26 03:45:24 -07:00
|
|
|
it's from an arbitary point in the context as opposed to part
|
|
|
|
of the current block of PDUs. Defaults to `False`
|
2020-02-03 13:41:54 -07:00
|
|
|
timeout: How long to try (in ms) each destination for before
|
2015-05-22 08:18:04 -06:00
|
|
|
moving to the next destination. None indicates no timeout.
|
2015-01-26 03:45:24 -07:00
|
|
|
|
|
|
|
Returns:
|
2020-02-03 13:41:54 -07:00
|
|
|
The requested PDU, or None if we were unable to find it.
|
2015-01-26 03:45:24 -07:00
|
|
|
"""
|
|
|
|
|
|
|
|
# TODO: Rate limit the number of times we try and get the same event.
|
|
|
|
|
2018-09-21 07:19:46 -06:00
|
|
|
ev = self._get_pdu_cache.get(event_id)
|
|
|
|
if ev:
|
2019-07-23 07:00:55 -06:00
|
|
|
return ev
|
2015-02-16 11:02:39 -07:00
|
|
|
|
2016-08-10 04:31:46 -06:00
|
|
|
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
|
|
|
|
2016-09-01 03:55:02 -06:00
|
|
|
signed_pdu = None
|
2015-01-26 03:45:24 -07:00
|
|
|
for destination in destinations:
|
2016-08-10 04:31:46 -06:00
|
|
|
now = self._clock.time_msec()
|
|
|
|
last_attempt = pdu_attempts.get(destination, 0)
|
|
|
|
if last_attempt + PDU_RETRY_TIME_MS > now:
|
|
|
|
continue
|
|
|
|
|
2015-01-26 03:45:24 -07:00
|
|
|
try:
|
2020-02-03 13:41:54 -07:00
|
|
|
transaction_data = await self.transport_layer.get_event(
|
2019-06-20 03:32:02 -06:00
|
|
|
destination, event_id, timeout=timeout
|
2015-01-26 03:45:24 -07:00
|
|
|
)
|
2015-01-26 07:33:11 -07:00
|
|
|
|
2019-06-05 03:35:40 -06:00
|
|
|
logger.debug(
|
|
|
|
"retrieved event id %s from %s: %r",
|
|
|
|
event_id,
|
|
|
|
destination,
|
|
|
|
transaction_data,
|
|
|
|
)
|
2015-02-17 10:20:56 -07:00
|
|
|
|
2017-03-22 18:12:21 -06:00
|
|
|
pdu_list = [
|
2020-01-31 09:50:13 -07:00
|
|
|
event_from_pdu_json(p, room_version, outlier=outlier)
|
2017-03-22 18:12:21 -06:00
|
|
|
for p in transaction_data["pdus"]
|
2020-02-28 05:31:07 -07:00
|
|
|
] # type: List[EventBase]
|
2015-01-26 07:33:11 -07:00
|
|
|
|
2017-03-22 18:12:21 -06:00
|
|
|
if pdu_list and pdu_list[0]:
|
|
|
|
pdu = pdu_list[0]
|
2015-01-26 07:33:11 -07:00
|
|
|
|
2017-03-22 18:12:21 -06:00
|
|
|
# Check signatures are correct.
|
2020-01-31 07:07:31 -07:00
|
|
|
signed_pdu = await self._check_sigs_and_hash(
|
|
|
|
room_version.identifier, pdu
|
|
|
|
)
|
2015-01-26 07:33:11 -07:00
|
|
|
|
2017-03-22 18:12:21 -06:00
|
|
|
break
|
2015-01-26 07:33:11 -07:00
|
|
|
|
2016-08-10 04:31:46 -06:00
|
|
|
pdu_attempts[destination] = now
|
|
|
|
|
2016-08-10 06:39:12 -06:00
|
|
|
except SynapseError as e:
|
2015-02-16 11:02:39 -07:00
|
|
|
logger.info(
|
2019-06-20 03:32:02 -06:00
|
|
|
"Failed to get PDU %s from %s because %s", event_id, destination, e
|
2015-02-16 11:02:39 -07:00
|
|
|
)
|
2019-06-04 11:05:06 -06:00
|
|
|
continue
|
2015-02-17 10:20:56 -07:00
|
|
|
except NotRetryingDestination as e:
|
2018-09-12 07:23:32 -06:00
|
|
|
logger.info(str(e))
|
2015-02-17 10:20:56 -07:00
|
|
|
continue
|
2018-01-22 11:11:18 -07:00
|
|
|
except FederationDeniedError as e:
|
2018-09-12 07:23:32 -06:00
|
|
|
logger.info(str(e))
|
2018-01-22 11:11:18 -07:00
|
|
|
continue
|
2015-01-26 03:45:24 -07:00
|
|
|
except Exception as e:
|
2016-08-10 04:31:46 -06:00
|
|
|
pdu_attempts[destination] = now
|
|
|
|
|
2015-01-26 03:45:24 -07:00
|
|
|
logger.info(
|
2019-06-20 03:32:02 -06:00
|
|
|
"Failed to get PDU %s from %s because %s", event_id, destination, e
|
2015-01-26 03:45:24 -07:00
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
2018-09-21 07:19:46 -06:00
|
|
|
if signed_pdu:
|
2016-09-01 03:55:02 -06:00
|
|
|
self._get_pdu_cache[event_id] = signed_pdu
|
2015-02-16 11:02:39 -07:00
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return signed_pdu
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-02-03 13:42:52 -07:00
|
|
|
async def get_room_state_ids(
|
|
|
|
self, destination: str, room_id: str, event_id: str
|
|
|
|
) -> Tuple[List[str], List[str]]:
|
2019-12-10 10:42:46 -07:00
|
|
|
"""Calls the /state_ids endpoint to fetch the state at a particular point
|
|
|
|
in the room, and the auth events for the given event
|
2015-01-26 03:45:24 -07:00
|
|
|
|
|
|
|
Returns:
|
2020-02-03 13:42:52 -07:00
|
|
|
a tuple of (state event_ids, auth event_ids)
|
2015-01-26 03:45:24 -07:00
|
|
|
"""
|
2020-02-03 13:42:52 -07:00
|
|
|
result = await self.transport_layer.get_room_state_ids(
|
2019-06-20 03:32:02 -06:00
|
|
|
destination, room_id, event_id=event_id
|
2016-08-03 08:04:29 -06:00
|
|
|
)
|
|
|
|
|
2019-12-09 04:37:26 -07:00
|
|
|
state_event_ids = result["pdu_ids"]
|
|
|
|
auth_event_ids = result.get("auth_chain_ids", [])
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2019-12-10 10:42:46 -07:00
|
|
|
if not isinstance(state_event_ids, list) or not isinstance(
|
|
|
|
auth_event_ids, list
|
|
|
|
):
|
|
|
|
raise Exception("invalid response from /state_ids")
|
2016-08-03 07:47:37 -06:00
|
|
|
|
2019-12-10 10:42:46 -07:00
|
|
|
return state_event_ids, auth_event_ids
|
2016-08-03 07:47:37 -06:00
|
|
|
|
2020-02-03 13:43:40 -07:00
|
|
|
async def get_event_auth(self, destination, room_id, event_id):
|
|
|
|
res = await self.transport_layer.get_event_auth(destination, room_id, event_id)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-01-31 09:50:13 -07:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2019-01-23 13:21:33 -07:00
|
|
|
|
2015-01-26 03:45:24 -07:00
|
|
|
auth_chain = [
|
2020-01-31 09:50:13 -07:00
|
|
|
event_from_pdu_json(p, room_version, outlier=True)
|
|
|
|
for p in res["auth_chain"]
|
2015-01-26 03:45:24 -07:00
|
|
|
]
|
|
|
|
|
2020-02-03 13:43:40 -07:00
|
|
|
signed_auth = await self._check_sigs_and_hash_and_fetch(
|
2020-01-31 09:50:13 -07:00
|
|
|
destination, auth_chain, outlier=True, room_version=room_version.identifier
|
2015-02-02 09:56:01 -07:00
|
|
|
)
|
2015-01-26 07:33:11 -07:00
|
|
|
|
2015-02-02 09:56:01 -07:00
|
|
|
signed_auth.sort(key=lambda e: e.depth)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return signed_auth
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-02-03 13:59:10 -07:00
|
|
|
async def _try_destination_list(
|
|
|
|
self,
|
|
|
|
description: str,
|
|
|
|
destinations: Iterable[str],
|
|
|
|
callback: Callable[[str], Awaitable[T]],
|
|
|
|
) -> T:
|
2018-08-01 04:24:19 -06:00
|
|
|
"""Try an operation on a series of servers, until it succeeds
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 13:59:10 -07:00
|
|
|
description: description of the operation we're doing, for logging
|
2018-08-01 04:24:19 -06:00
|
|
|
|
2020-02-03 13:59:10 -07:00
|
|
|
destinations: list of server_names to try
|
2018-08-01 04:24:19 -06:00
|
|
|
|
2020-02-03 13:59:10 -07:00
|
|
|
callback: Function to run for each server. Passed a single
|
|
|
|
argument: the server_name to try.
|
2018-08-01 04:24:19 -06:00
|
|
|
|
|
|
|
If the callback raises a CodeMessageException with a 300/400 code,
|
|
|
|
attempts to perform the operation stop immediately and the exception is
|
|
|
|
reraised.
|
|
|
|
|
|
|
|
Otherwise, if the callback raises an Exception the error is logged and the
|
|
|
|
next server tried. Normally the stacktrace is logged but this is
|
|
|
|
suppressed if the exception is an InvalidResponseError.
|
|
|
|
|
|
|
|
Returns:
|
2020-02-03 13:59:10 -07:00
|
|
|
The result of callback, if it succeeds
|
2018-08-01 04:24:19 -06:00
|
|
|
|
|
|
|
Raises:
|
2019-08-01 06:47:31 -06:00
|
|
|
SynapseError if the chosen remote server returns a 300/400 code, or
|
|
|
|
no servers were reachable.
|
2018-08-01 04:24:19 -06:00
|
|
|
"""
|
|
|
|
for destination in destinations:
|
|
|
|
if destination == self.server_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
2020-02-03 13:59:10 -07:00
|
|
|
res = await callback(destination)
|
2019-07-23 07:00:55 -06:00
|
|
|
return res
|
2018-08-01 04:24:19 -06:00
|
|
|
except InvalidResponseError as e:
|
2019-10-31 04:23:24 -06:00
|
|
|
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
2020-01-27 07:30:57 -07:00
|
|
|
except UnsupportedRoomVersionError:
|
|
|
|
raise
|
2018-08-01 06:47:07 -06:00
|
|
|
except HttpResponseException as e:
|
2018-08-01 04:24:19 -06:00
|
|
|
if not 500 <= e.code < 600:
|
2018-08-01 07:58:16 -06:00
|
|
|
raise e.to_synapse_error()
|
2018-08-01 04:24:19 -06:00
|
|
|
else:
|
2019-10-31 04:23:24 -06:00
|
|
|
logger.warning(
|
2018-08-01 04:24:19 -06:00
|
|
|
"Failed to %s via %s: %i %s",
|
2019-06-20 03:32:02 -06:00
|
|
|
description,
|
|
|
|
destination,
|
|
|
|
e.code,
|
|
|
|
e.args[0],
|
2018-08-01 04:24:19 -06:00
|
|
|
)
|
|
|
|
except Exception:
|
2019-10-31 04:23:24 -06:00
|
|
|
logger.warning(
|
2020-02-03 13:59:10 -07:00
|
|
|
"Failed to %s via %s", description, destination, exc_info=True
|
2019-10-31 04:23:24 -06:00
|
|
|
)
|
2018-08-01 04:24:19 -06:00
|
|
|
|
2019-08-01 06:47:31 -06:00
|
|
|
raise SynapseError(502, "Failed to %s via any server" % (description,))
|
2018-08-01 04:24:19 -06:00
|
|
|
|
2020-02-03 13:51:26 -07:00
|
|
|
async def make_membership_event(
|
2020-01-27 07:30:57 -07:00
|
|
|
self,
|
|
|
|
destinations: Iterable[str],
|
|
|
|
room_id: str,
|
|
|
|
user_id: str,
|
|
|
|
membership: str,
|
|
|
|
content: dict,
|
|
|
|
params: Dict[str, str],
|
2020-02-03 13:51:26 -07:00
|
|
|
) -> Tuple[str, EventBase, RoomVersion]:
|
2015-10-20 04:58:58 -06:00
|
|
|
"""
|
|
|
|
Creates an m.room.member event, with context, without participating in the room.
|
|
|
|
|
|
|
|
Does so by asking one of the already participating servers to create an
|
|
|
|
event with proper context.
|
|
|
|
|
2019-01-24 11:08:08 -07:00
|
|
|
Returns a fully signed and hashed event.
|
|
|
|
|
2015-10-20 04:58:58 -06:00
|
|
|
Note that this does not append any events to any graphs.
|
|
|
|
|
|
|
|
Args:
|
2020-01-27 07:30:57 -07:00
|
|
|
destinations: Candidate homeservers which are probably
|
2015-10-20 04:58:58 -06:00
|
|
|
participating in the room.
|
2020-01-27 07:30:57 -07:00
|
|
|
room_id: The room in which the event will happen.
|
|
|
|
user_id: The user whose membership is being evented.
|
|
|
|
membership: The "membership" property of the event. Must be one of
|
|
|
|
"join" or "leave".
|
|
|
|
content: Any additional data to put into the content field of the
|
|
|
|
event.
|
|
|
|
params: Query parameters to include in the request.
|
2020-02-03 13:51:26 -07:00
|
|
|
|
|
|
|
Returns:
|
2020-01-27 07:30:57 -07:00
|
|
|
`(origin, event, room_version)` where origin is the remote
|
|
|
|
homeserver which generated the event, and room_version is the
|
|
|
|
version of the room.
|
|
|
|
|
2020-02-03 13:51:26 -07:00
|
|
|
Raises:
|
|
|
|
UnsupportedRoomVersionError: if remote responds with
|
|
|
|
a room version we don't understand.
|
2017-04-20 17:46:54 -06:00
|
|
|
|
2020-02-03 13:51:26 -07:00
|
|
|
SynapseError: if the chosen remote server returns a 300/400 code.
|
2017-04-20 17:46:54 -06:00
|
|
|
|
2020-02-03 13:51:26 -07:00
|
|
|
RuntimeError: if no servers were reachable.
|
2015-10-20 04:58:58 -06:00
|
|
|
"""
|
|
|
|
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
|
|
|
if membership not in valid_memberships:
|
|
|
|
raise RuntimeError(
|
2019-06-20 03:32:02 -06:00
|
|
|
"make_membership_event called with membership='%s', must be one of %s"
|
|
|
|
% (membership, ",".join(valid_memberships))
|
2015-10-20 04:58:58 -06:00
|
|
|
)
|
2015-06-26 02:52:24 -06:00
|
|
|
|
2020-02-03 14:07:13 -07:00
|
|
|
async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]:
|
|
|
|
ret = await self.transport_layer.make_membership_event(
|
2019-06-20 03:32:02 -06:00
|
|
|
destination, room_id, user_id, membership, params
|
2018-08-01 04:24:19 -06:00
|
|
|
)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2019-01-23 09:50:06 -07:00
|
|
|
# Note: If not supplied, the room version may be either v1 or v2,
|
|
|
|
# however either way the event format version will be v1.
|
2020-01-27 07:30:57 -07:00
|
|
|
room_version_id = ret.get("room_version", RoomVersions.V1.identifier)
|
|
|
|
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
|
|
|
if not room_version:
|
|
|
|
raise UnsupportedRoomVersionError()
|
|
|
|
|
2018-08-01 08:35:29 -06:00
|
|
|
pdu_dict = ret.get("event", None)
|
|
|
|
if not isinstance(pdu_dict, dict):
|
|
|
|
raise InvalidResponseError("Bad 'event' field in response")
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
logger.debug("Got response to make_%s: %s", membership, pdu_dict)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
pdu_dict["content"].update(content)
|
2015-11-12 09:19:55 -07:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
# The protoevent received over the JSON wire may not have all
|
|
|
|
# the required fields. Lets just gloss over that because
|
|
|
|
# there's some we never care about
|
|
|
|
if "prev_state" not in pdu_dict:
|
|
|
|
pdu_dict["prev_state"] = []
|
2015-11-13 10:27:25 -07:00
|
|
|
|
2019-01-25 10:19:31 -07:00
|
|
|
ev = builder.create_local_event_from_event_dict(
|
2019-06-20 03:32:02 -06:00
|
|
|
self._clock,
|
|
|
|
self.hostname,
|
|
|
|
self.signing_key,
|
2020-01-29 10:58:01 -07:00
|
|
|
room_version=room_version,
|
2019-06-20 03:32:02 -06:00
|
|
|
event_dict=pdu_dict,
|
2019-01-23 12:44:37 -07:00
|
|
|
)
|
2017-01-13 06:21:04 -07:00
|
|
|
|
2020-02-03 14:07:13 -07:00
|
|
|
return destination, ev, room_version
|
2015-02-04 09:28:12 -07:00
|
|
|
|
2020-02-03 13:51:26 -07:00
|
|
|
return await self._try_destination_list(
|
2019-06-20 03:32:02 -06:00
|
|
|
"make_" + membership, destinations, send_request
|
2018-08-01 04:24:19 -06:00
|
|
|
)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-02-03 13:55:00 -07:00
|
|
|
async def send_join(
|
2020-02-06 08:50:39 -07:00
|
|
|
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
|
2020-02-03 13:55:00 -07:00
|
|
|
) -> Dict[str, Any]:
|
2017-04-20 17:46:54 -06:00
|
|
|
"""Sends a join event to one of a list of homeservers.
|
|
|
|
|
|
|
|
Doing so will cause the remote server to add the event to the graph,
|
|
|
|
and send the event out to the rest of the federation.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 13:55:00 -07:00
|
|
|
destinations: Candidate homeservers which are probably
|
2017-04-20 17:46:54 -06:00
|
|
|
participating in the room.
|
2020-02-03 13:55:00 -07:00
|
|
|
pdu: event to be sent
|
2020-02-06 08:50:39 -07:00
|
|
|
room_version: the version of the room (according to the server that
|
|
|
|
did the make_join)
|
2017-04-20 17:46:54 -06:00
|
|
|
|
2020-02-03 13:55:00 -07:00
|
|
|
Returns:
|
|
|
|
a dict with members ``origin`` (a string
|
2020-02-05 08:47:00 -07:00
|
|
|
giving the server the event was sent to, ``state`` (?) and
|
2017-04-20 17:46:54 -06:00
|
|
|
``auth_chain``.
|
|
|
|
|
2020-02-03 13:55:00 -07:00
|
|
|
Raises:
|
|
|
|
SynapseError: if the chosen remote server returns a 300/400 code.
|
2017-04-20 17:46:54 -06:00
|
|
|
|
2020-02-03 13:55:00 -07:00
|
|
|
RuntimeError: if no servers were reachable.
|
2017-04-20 17:46:54 -06:00
|
|
|
"""
|
|
|
|
|
2020-02-03 14:07:38 -07:00
|
|
|
async def send_request(destination) -> Dict[str, Any]:
|
|
|
|
content = await self._do_send_join(destination, pdu)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
logger.debug("Got content: %s", content)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
state = [
|
2020-01-31 09:50:13 -07:00
|
|
|
event_from_pdu_json(p, room_version, outlier=True)
|
2018-08-01 04:24:19 -06:00
|
|
|
for p in content.get("state", [])
|
|
|
|
]
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
auth_chain = [
|
2020-01-31 09:50:13 -07:00
|
|
|
event_from_pdu_json(p, room_version, outlier=True)
|
2018-08-01 04:24:19 -06:00
|
|
|
for p in content.get("auth_chain", [])
|
|
|
|
]
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2019-06-20 03:32:02 -06:00
|
|
|
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
|
2015-06-26 02:52:24 -06:00
|
|
|
|
2020-02-06 08:50:39 -07:00
|
|
|
create_event = None
|
2019-01-23 10:19:58 -07:00
|
|
|
for e in state:
|
|
|
|
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
2020-02-06 08:50:39 -07:00
|
|
|
create_event = e
|
2019-01-23 10:19:58 -07:00
|
|
|
break
|
|
|
|
|
2020-02-06 08:50:39 -07:00
|
|
|
if create_event is None:
|
2019-01-24 11:31:23 -07:00
|
|
|
# If the state doesn't have a create event then the room is
|
|
|
|
# invalid, and it would fail auth checks anyway.
|
2019-01-23 10:19:58 -07:00
|
|
|
raise SynapseError(400, "No create event in state")
|
|
|
|
|
2020-02-06 08:50:39 -07:00
|
|
|
# the room version should be sane.
|
|
|
|
create_room_version = create_event.content.get(
|
|
|
|
"room_version", RoomVersions.V1.identifier
|
|
|
|
)
|
|
|
|
if create_room_version != room_version.identifier:
|
|
|
|
# either the server that fulfilled the make_join, or the server that is
|
|
|
|
# handling the send_join, is lying.
|
|
|
|
raise InvalidResponseError(
|
|
|
|
"Unexpected room version %s in create event"
|
|
|
|
% (create_room_version,)
|
|
|
|
)
|
|
|
|
|
2020-02-03 14:07:38 -07:00
|
|
|
valid_pdus = await self._check_sigs_and_hash_and_fetch(
|
2019-06-20 03:32:02 -06:00
|
|
|
destination,
|
|
|
|
list(pdus.values()),
|
2018-08-01 04:24:19 -06:00
|
|
|
outlier=True,
|
2020-02-06 08:50:39 -07:00
|
|
|
room_version=room_version.identifier,
|
2018-08-01 04:24:19 -06:00
|
|
|
)
|
2015-06-26 02:52:24 -06:00
|
|
|
|
2019-06-20 03:32:02 -06:00
|
|
|
valid_pdus_map = {p.event_id: p for p in valid_pdus}
|
2015-06-26 02:52:24 -06:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
# NB: We *need* to copy to ensure that we don't have multiple
|
|
|
|
# references being passed on, as that causes... issues.
|
|
|
|
signed_state = [
|
|
|
|
copy.copy(valid_pdus_map[p.event_id])
|
|
|
|
for p in state
|
|
|
|
if p.event_id in valid_pdus_map
|
|
|
|
]
|
2015-06-26 02:52:24 -06:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
signed_auth = [
|
|
|
|
valid_pdus_map[p.event_id]
|
|
|
|
for p in auth_chain
|
|
|
|
if p.event_id in valid_pdus_map
|
|
|
|
]
|
2015-01-26 07:33:11 -07:00
|
|
|
|
2018-08-01 04:24:19 -06:00
|
|
|
# NB: We *need* to copy to ensure that we don't have multiple
|
|
|
|
# references being passed on, as that causes... issues.
|
|
|
|
for s in signed_state:
|
|
|
|
s.internal_metadata = copy.deepcopy(s.internal_metadata)
|
2015-02-04 09:28:12 -07:00
|
|
|
|
2020-02-06 08:50:39 -07:00
|
|
|
# double-check that the same create event has ended up in the auth chain
|
|
|
|
auth_chain_create_events = [
|
|
|
|
e.event_id
|
|
|
|
for e in signed_auth
|
|
|
|
if (e.type, e.state_key) == (EventTypes.Create, "")
|
|
|
|
]
|
|
|
|
if auth_chain_create_events != [create_event.event_id]:
|
|
|
|
raise InvalidResponseError(
|
2020-02-28 05:31:07 -07:00
|
|
|
"Unexpected create event(s) in auth chain: %s"
|
2020-02-06 08:50:39 -07:00
|
|
|
% (auth_chain_create_events,)
|
|
|
|
)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return {
|
|
|
|
"state": signed_state,
|
|
|
|
"auth_chain": signed_auth,
|
|
|
|
"origin": destination,
|
|
|
|
}
|
2019-06-20 03:32:02 -06:00
|
|
|
|
2020-02-03 13:55:00 -07:00
|
|
|
return await self._try_destination_list("send_join", destinations, send_request)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-02-03 14:08:24 -07:00
|
|
|
async def _do_send_join(self, destination: str, pdu: EventBase):
|
2019-11-11 08:47:47 -07:00
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
|
|
|
|
try:
|
2020-02-03 14:08:24 -07:00
|
|
|
content = await self.transport_layer.send_join_v2(
|
2019-11-11 08:47:47 -07:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
|
|
|
|
|
|
|
return content
|
|
|
|
except HttpResponseException as e:
|
|
|
|
if e.code in [400, 404]:
|
|
|
|
err = e.to_synapse_error()
|
|
|
|
|
|
|
|
# If we receive an error response that isn't a generic error, or an
|
|
|
|
# unrecognised endpoint error, we assume that the remote understands
|
|
|
|
# the v2 invite API and this is a legitimate error.
|
2019-11-11 09:56:55 -07:00
|
|
|
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
2019-11-11 08:47:47 -07:00
|
|
|
raise err
|
|
|
|
else:
|
|
|
|
raise e.to_synapse_error()
|
|
|
|
|
|
|
|
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
|
|
|
|
2020-02-03 14:08:24 -07:00
|
|
|
resp = await self.transport_layer.send_join_v1(
|
2019-11-11 08:47:47 -07:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
|
|
|
|
|
|
|
# We expect the v1 API to respond with [200, content], so we only return the
|
|
|
|
# content.
|
|
|
|
return resp[1]
|
|
|
|
|
2020-02-05 08:47:00 -07:00
|
|
|
async def send_invite(
|
|
|
|
self, destination: str, room_id: str, event_id: str, pdu: EventBase,
|
|
|
|
) -> EventBase:
|
2020-02-05 10:35:09 -07:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2019-01-28 07:55:53 -07:00
|
|
|
|
2020-02-03 15:29:49 -07:00
|
|
|
content = await self._do_send_invite(destination, pdu, room_version)
|
2015-01-26 03:45:24 -07:00
|
|
|
|
|
|
|
pdu_dict = content["event"]
|
|
|
|
|
|
|
|
logger.debug("Got response to send_invite: %s", pdu_dict)
|
|
|
|
|
2020-01-31 09:50:13 -07:00
|
|
|
pdu = event_from_pdu_json(pdu_dict, room_version)
|
2015-01-26 07:33:11 -07:00
|
|
|
|
|
|
|
# Check signatures are correct.
|
2020-02-05 10:35:09 -07:00
|
|
|
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
|
2015-01-26 07:33:11 -07:00
|
|
|
|
|
|
|
# FIXME: We should handle signature failures more gracefully.
|
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return pdu
|
2015-01-26 03:45:24 -07:00
|
|
|
|
2020-02-05 08:49:42 -07:00
|
|
|
async def _do_send_invite(
|
2020-02-05 10:35:09 -07:00
|
|
|
self, destination: str, pdu: EventBase, room_version: RoomVersion
|
2020-02-05 08:49:42 -07:00
|
|
|
) -> JsonDict:
|
2019-01-28 07:55:53 -07:00
|
|
|
"""Actually sends the invite, first trying v2 API and falling back to
|
|
|
|
v1 API if necessary.
|
|
|
|
|
|
|
|
Returns:
|
2020-02-05 08:49:42 -07:00
|
|
|
The event as a dict as returned by the remote server
|
2019-01-28 07:55:53 -07:00
|
|
|
"""
|
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
|
|
|
|
try:
|
2020-02-05 08:49:42 -07:00
|
|
|
content = await self.transport_layer.send_invite_v2(
|
2019-01-28 07:55:53 -07:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content={
|
|
|
|
"event": pdu.get_pdu_json(time_now),
|
2020-02-05 10:35:09 -07:00
|
|
|
"room_version": room_version.identifier,
|
2019-01-28 07:55:53 -07:00
|
|
|
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
|
|
|
|
},
|
|
|
|
)
|
2019-07-23 07:00:55 -06:00
|
|
|
return content
|
2019-01-28 07:55:53 -07:00
|
|
|
except HttpResponseException as e:
|
|
|
|
if e.code in [400, 404]:
|
2019-02-23 07:31:08 -07:00
|
|
|
err = e.to_synapse_error()
|
|
|
|
|
|
|
|
# If we receive an error response that isn't a generic error, we
|
|
|
|
# assume that the remote understands the v2 invite API and this
|
|
|
|
# is a legitimate error.
|
|
|
|
if err.errcode != Codes.UNKNOWN:
|
|
|
|
raise err
|
|
|
|
|
|
|
|
# Otherwise, we assume that the remote server doesn't understand
|
2019-04-01 03:24:38 -06:00
|
|
|
# the v2 invite API. That's ok provided the room uses old-style event
|
|
|
|
# IDs.
|
2020-02-05 10:35:09 -07:00
|
|
|
if room_version.event_format != EventFormatVersions.V1:
|
2019-02-23 07:31:08 -07:00
|
|
|
raise SynapseError(
|
|
|
|
400,
|
|
|
|
"User's homeserver does not support this room version",
|
|
|
|
Codes.UNSUPPORTED_ROOM_VERSION,
|
|
|
|
)
|
2019-01-28 07:55:53 -07:00
|
|
|
elif e.code == 403:
|
|
|
|
raise e.to_synapse_error()
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
|
|
|
# Didn't work, try v1 API.
|
|
|
|
# Note the v1 API returns a tuple of `(200, content)`
|
|
|
|
|
2020-02-05 08:49:42 -07:00
|
|
|
_, content = await self.transport_layer.send_invite_v1(
|
2019-01-28 07:55:53 -07:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
2019-07-23 07:00:55 -06:00
|
|
|
return content
|
2019-01-28 07:55:53 -07:00
|
|
|
|
2020-02-03 13:55:11 -07:00
|
|
|
async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None:
|
2017-04-20 17:46:54 -06:00
|
|
|
"""Sends a leave event to one of a list of homeservers.
|
|
|
|
|
|
|
|
Doing so will cause the remote server to add the event to the graph,
|
|
|
|
and send the event out to the rest of the federation.
|
|
|
|
|
|
|
|
This is mostly useful to reject received invites.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 13:55:11 -07:00
|
|
|
destinations: Candidate homeservers which are probably
|
2017-04-20 17:46:54 -06:00
|
|
|
participating in the room.
|
2020-02-03 13:55:11 -07:00
|
|
|
pdu: event to be sent
|
2017-04-20 17:46:54 -06:00
|
|
|
|
2020-02-03 13:55:11 -07:00
|
|
|
Raises:
|
|
|
|
SynapseError if the chosen remote server returns a 300/400 code.
|
2017-04-20 17:46:54 -06:00
|
|
|
|
2020-02-03 13:55:11 -07:00
|
|
|
RuntimeError if no servers were reachable.
|
2017-04-20 17:46:54 -06:00
|
|
|
"""
|
2019-06-20 03:32:02 -06:00
|
|
|
|
2020-02-03 14:08:51 -07:00
|
|
|
async def send_request(destination: str) -> None:
|
|
|
|
content = await self._do_send_leave(destination, pdu)
|
2019-11-11 09:26:53 -07:00
|
|
|
logger.debug("Got content: %s", content)
|
|
|
|
|
2020-02-03 13:55:11 -07:00
|
|
|
return await self._try_destination_list(
|
|
|
|
"send_leave", destinations, send_request
|
|
|
|
)
|
2019-11-11 09:26:53 -07:00
|
|
|
|
2020-02-03 14:09:07 -07:00
|
|
|
async def _do_send_leave(self, destination, pdu):
|
2019-11-11 09:26:53 -07:00
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
|
|
|
|
try:
|
2020-02-03 14:09:07 -07:00
|
|
|
content = await self.transport_layer.send_leave_v2(
|
2018-08-01 04:24:19 -06:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
2015-10-20 04:58:58 -06:00
|
|
|
|
2019-11-11 09:26:53 -07:00
|
|
|
return content
|
|
|
|
except HttpResponseException as e:
|
|
|
|
if e.code in [400, 404]:
|
|
|
|
err = e.to_synapse_error()
|
2015-10-20 04:58:58 -06:00
|
|
|
|
2019-11-11 09:26:53 -07:00
|
|
|
# If we receive an error response that isn't a generic error, or an
|
|
|
|
# unrecognised endpoint error, we assume that the remote understands
|
|
|
|
# the v2 invite API and this is a legitimate error.
|
2019-11-11 09:56:55 -07:00
|
|
|
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
2019-11-11 09:26:53 -07:00
|
|
|
raise err
|
|
|
|
else:
|
|
|
|
raise e.to_synapse_error()
|
|
|
|
|
|
|
|
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
|
|
|
|
2020-02-03 14:09:07 -07:00
|
|
|
resp = await self.transport_layer.send_leave_v1(
|
2019-11-11 09:26:53 -07:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
|
|
|
|
|
|
|
# We expect the v1 API to respond with [200, content], so we only return the
|
|
|
|
# content.
|
|
|
|
return resp[1]
|
2015-10-20 04:58:58 -06:00
|
|
|
|
2019-06-20 03:32:02 -06:00
|
|
|
def get_public_rooms(
|
|
|
|
self,
|
|
|
|
destination,
|
|
|
|
limit=None,
|
|
|
|
since_token=None,
|
|
|
|
search_filter=None,
|
|
|
|
include_all_networks=False,
|
|
|
|
third_party_instance_id=None,
|
|
|
|
):
|
2016-09-15 03:36:19 -06:00
|
|
|
if destination == self.server_name:
|
|
|
|
return
|
2016-05-31 10:20:07 -06:00
|
|
|
|
2016-09-16 03:24:15 -06:00
|
|
|
return self.transport_layer.get_public_rooms(
|
2019-06-20 03:32:02 -06:00
|
|
|
destination,
|
|
|
|
limit,
|
|
|
|
since_token,
|
|
|
|
search_filter,
|
2016-12-06 03:43:48 -07:00
|
|
|
include_all_networks=include_all_networks,
|
|
|
|
third_party_instance_id=third_party_instance_id,
|
2016-09-16 03:24:15 -06:00
|
|
|
)
|
2016-05-31 10:20:07 -06:00
|
|
|
|
2020-02-03 14:14:30 -07:00
|
|
|
async def get_missing_events(
|
2019-06-20 03:32:02 -06:00
|
|
|
self,
|
2020-02-03 14:14:30 -07:00
|
|
|
destination: str,
|
|
|
|
room_id: str,
|
|
|
|
earliest_events_ids: Sequence[str],
|
|
|
|
latest_events: Iterable[EventBase],
|
|
|
|
limit: int,
|
|
|
|
min_depth: int,
|
|
|
|
timeout: int,
|
|
|
|
) -> List[EventBase]:
|
2015-03-05 09:31:13 -07:00
|
|
|
"""Tries to fetch events we are missing. This is called when we receive
|
|
|
|
an event without having received all of its ancestors.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 14:14:30 -07:00
|
|
|
destination
|
|
|
|
room_id
|
|
|
|
earliest_events_ids: List of event ids. Effectively the
|
2015-03-05 09:31:13 -07:00
|
|
|
events we expected to receive, but haven't. `get_missing_events`
|
|
|
|
should only return events that didn't happen before these.
|
2020-02-03 14:14:30 -07:00
|
|
|
latest_events: List of events we have received that we don't
|
2015-03-05 09:31:13 -07:00
|
|
|
have all previous events for.
|
2020-02-03 14:14:30 -07:00
|
|
|
limit: Maximum number of events to return.
|
|
|
|
min_depth: Minimum depth of events to return.
|
|
|
|
timeout: Max time to wait in ms
|
2015-03-05 09:31:13 -07:00
|
|
|
"""
|
2015-03-05 09:08:02 -07:00
|
|
|
try:
|
2020-02-03 14:14:30 -07:00
|
|
|
content = await self.transport_layer.get_missing_events(
|
2015-03-05 09:08:02 -07:00
|
|
|
destination=destination,
|
|
|
|
room_id=room_id,
|
|
|
|
earliest_events=earliest_events_ids,
|
|
|
|
latest_events=[e.event_id for e in latest_events],
|
|
|
|
limit=limit,
|
|
|
|
min_depth=min_depth,
|
2016-12-31 08:21:37 -07:00
|
|
|
timeout=timeout,
|
2015-03-05 09:08:02 -07:00
|
|
|
)
|
|
|
|
|
2020-01-31 09:50:13 -07:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2019-01-23 13:21:33 -07:00
|
|
|
|
2015-03-05 09:08:02 -07:00
|
|
|
events = [
|
2020-01-31 09:50:13 -07:00
|
|
|
event_from_pdu_json(e, room_version) for e in content.get("events", [])
|
2015-03-05 09:08:02 -07:00
|
|
|
]
|
|
|
|
|
2020-02-03 14:14:30 -07:00
|
|
|
signed_events = await self._check_sigs_and_hash_and_fetch(
|
2020-01-31 09:50:13 -07:00
|
|
|
destination, events, outlier=False, room_version=room_version.identifier
|
2015-03-05 09:08:02 -07:00
|
|
|
)
|
|
|
|
except HttpResponseException as e:
|
|
|
|
if not e.code == 400:
|
|
|
|
raise
|
2015-02-23 06:58:02 -07:00
|
|
|
|
2015-03-05 09:31:13 -07:00
|
|
|
# We are probably hitting an old server that doesn't support
|
|
|
|
# get_missing_events
|
2015-03-05 09:08:02 -07:00
|
|
|
signed_events = []
|
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return signed_events
|
2015-02-23 06:58:02 -07:00
|
|
|
|
2015-11-05 09:43:19 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
|
|
|
for destination in destinations:
|
|
|
|
if destination == self.server_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield self.transport_layer.exchange_third_party_invite(
|
2019-06-20 03:32:02 -06:00
|
|
|
destination=destination, room_id=room_id, event_dict=event_dict
|
2015-11-05 09:43:19 -07:00
|
|
|
)
|
2019-07-23 07:00:55 -06:00
|
|
|
return None
|
2015-11-05 09:43:19 -07:00
|
|
|
except CodeMessageException:
|
|
|
|
raise
|
|
|
|
except Exception as e:
|
|
|
|
logger.exception(
|
2019-06-20 03:32:02 -06:00
|
|
|
"Failed to send_third_party_invite via %s: %s", destination, str(e)
|
2015-11-05 09:43:19 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
raise RuntimeError("Failed to send to any server.")
|
2019-07-29 10:47:27 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_room_complexity(self, destination, room_id):
|
|
|
|
"""
|
|
|
|
Fetch the complexity of a remote room from another server.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str): The remote server
|
|
|
|
room_id (str): The room ID to ask about.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[dict] or Deferred[None]: Dict contains the complexity
|
|
|
|
metric versions, while None means we could not fetch the complexity.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
complexity = yield self.transport_layer.get_room_complexity(
|
|
|
|
destination=destination, room_id=room_id
|
|
|
|
)
|
|
|
|
defer.returnValue(complexity)
|
|
|
|
except CodeMessageException as e:
|
|
|
|
# We didn't manage to get it -- probably a 404. We are okay if other
|
|
|
|
# servers don't give it to us.
|
|
|
|
logger.debug(
|
|
|
|
"Failed to fetch room complexity via %s for %s, got a %d",
|
|
|
|
destination,
|
|
|
|
room_id,
|
|
|
|
e.code,
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
logger.exception(
|
|
|
|
"Failed to fetch room complexity via %s for %s", destination, room_id
|
|
|
|
)
|
|
|
|
|
|
|
|
# If we don't manage to find it, return None. It's not an error if a
|
|
|
|
# server doesn't give it to us.
|
|
|
|
defer.returnValue(None)
|