From 76c80e3fdf96f93cface7d8251910bb085ce2a24 Mon Sep 17 00:00:00 2001 From: Nicholas Eckardt Date: Sun, 10 Jun 2018 18:15:15 -0700 Subject: [PATCH 001/138] The project `matrix-synapse-auto-deploy` does not seem to be maintained anymore. It has been over a year since any code has been commited. Some of the relevant links in the documentation are broken, but since no pull requests are being accepted, they won't get fixed. We should probably remove this from the README. --- README.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.rst b/README.rst index 4fe54b0c90..68e31ba8cf 100644 --- a/README.rst +++ b/README.rst @@ -161,10 +161,6 @@ There is an offical synapse image available at https://hub.docker.com/r/matrixdo Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/ -Also, Martin Giess has created an auto-deployment process with vagrant/ansible, -tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy -for details. - Configuring synapse ------------------- From 5c261107c9b5bf82a51c27fd6cb85e2199e539e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Aug 2018 15:34:56 +0100 Subject: [PATCH 002/138] Remove unnecessary resolve_events_with_state_map We only ever used the synchronous resolve_events_with_state_map in one place, which is trivial to replace with the async version. --- synapse/handlers/federation.py | 2 +- synapse/state/__init__.py | 34 ++++++---------------------------- synapse/state/v1.py | 28 ---------------------------- 3 files changed, 7 insertions(+), 57 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0ebf0fd188..3fa7a98445 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1831,7 +1831,7 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(event.room_id) - new_state = self.state_handler.resolve_events( + new_state = yield self.state_handler.resolve_events( room_version, [list(local_view.values()), list(remote_view.values())], event diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b34970e4d1..d7ae22a661 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -385,6 +385,7 @@ class StateHandler(object): ev_ids, get_prev_content=False, check_redacted=False, ) + @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): logger.info( "Resolving state for %s with %d groups", event.room_id, len(state_sets) @@ -401,15 +402,17 @@ class StateHandler(object): } with Measure(self.clock, "state._resolve_events"): - new_state = resolve_events_with_state_map( - room_version, state_set_ids, state_map, + new_state = yield resolve_events_with_factory( + room_version, state_set_ids, + event_map=state_map, + state_map_factory=self._state_map_factory ) new_state = { key: state_map[ev_id] for key, ev_id in iteritems(new_state) } - return new_state + defer.returnValue(new_state) class StateResolutionHandler(object): @@ -589,31 +592,6 @@ def _make_state_cache_entry( ) -def resolve_events_with_state_map(room_version, state_sets, state_map): - """ - Args: - room_version(str): Version of the room - state_sets(list): List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - state_map(dict): a dict from event_id to event, for all events in - state_sets. - - Returns - dict[(str, str), str]: - a map from (type, state_key) to event_id. - """ - if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,): - return v1.resolve_events_with_state_map( - state_sets, state_map, - ) - else: - # This should only happen if we added a version but forgot to add it to - # the list above. - raise Exception( - "No state resolution algorithm defined for version %r" % (room_version,) - ) - - def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory): """ Args: diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 3a1f7054a1..c95477d318 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -30,34 +30,6 @@ logger = logging.getLogger(__name__) POWER_KEY = (EventTypes.PowerLevels, "") -def resolve_events_with_state_map(state_sets, state_map): - """ - Args: - state_sets(list): List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - state_map(dict): a dict from event_id to event, for all events in - state_sets. - - Returns - dict[(str, str), str]: - a map from (type, state_key) to event_id. - """ - if len(state_sets) == 1: - return state_sets[0] - - unconflicted_state, conflicted_state = _seperate( - state_sets, - ) - - auth_events = _create_auth_events_from_maps( - unconflicted_state, conflicted_state, state_map - ) - - return _resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map - ) - - @defer.inlineCallbacks def resolve_events_with_factory(state_sets, event_map, state_map_factory): """ From 0b07f02e19e5ed1eba0e74c3863def9f849b5f9a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Aug 2018 13:39:49 +0100 Subject: [PATCH 003/138] Make sure that we close db connections opened during init We should explicitly close any db connections we open, because failing to do so can block other transactions as per https://github.com/matrix-org/synapse/issues/3682. Let's also try to factor out some of the boilerplate by having server classes define their datastore class rather than duplicating the whole of `setup`. --- synapse/app/appservice.py | 5 +---- synapse/app/client_reader.py | 5 +---- synapse/app/event_creator.py | 5 +---- synapse/app/federation_reader.py | 5 +---- synapse/app/federation_sender.py | 5 +---- synapse/app/frontend_proxy.py | 5 +---- synapse/app/homeserver.py | 14 ++++++++------ synapse/app/media_repository.py | 5 +---- synapse/app/pusher.py | 5 +---- synapse/app/synchrotron.py | 5 +---- synapse/app/user_dir.py | 5 +---- synapse/server.py | 15 +++++++++++++-- 12 files changed, 31 insertions(+), 48 deletions(-) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 3348a8ec6d..86b5067400 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -51,10 +51,7 @@ class AppserviceSlaveStore( class AppserviceServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = AppserviceSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = AppserviceSlaveStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index ab79a45646..ce2b113dbb 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -74,10 +74,7 @@ class ClientReaderSlavedStore( class ClientReaderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = ClientReaderSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index a34c89fa99..f98e456ea0 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -90,10 +90,7 @@ class EventCreatorSlavedStore( class EventCreatorServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = EventCreatorSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 7d8105778d..60f5973505 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -72,10 +72,7 @@ class FederationReaderSlavedStore( class FederationReaderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FederationReaderSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index d59007099b..60dd09aac3 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -78,10 +78,7 @@ class FederationSenderSlaveStore( class FederationSenderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FederationSenderSlaveStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 8d484c1cd4..8c0b9c67b0 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -148,10 +148,7 @@ class FrontendProxySlavedStore( class FrontendProxyServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FrontendProxySlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 005921dcf7..3eb5b663de 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -62,7 +62,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.server import HomeServer -from synapse.storage import are_all_users_on_domain +from synapse.storage import DataStore, are_all_users_on_domain from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database from synapse.util.caches import CACHE_SIZE_FACTOR @@ -111,6 +111,8 @@ def build_resource_for_web_client(hs): class SynapseHomeServer(HomeServer): + DATASTORE_CLASS = DataStore + def _listener_http(self, config, listener_config): port = listener_config["port"] bind_addresses = listener_config["bind_addresses"] @@ -356,13 +358,13 @@ def setup(config_options): logger.info("Preparing database: %s...", config.database_config['name']) try: - db_conn = hs.get_db_conn(run_new_connection=False) - prepare_database(db_conn, database_engine, config=config) - database_engine.on_new_connection(db_conn) + with hs.get_db_conn(run_new_connection=False) as db_conn: + prepare_database(db_conn, database_engine, config=config) + database_engine.on_new_connection(db_conn) - hs.run_startup_checks(db_conn, database_engine) + hs.run_startup_checks(db_conn, database_engine) - db_conn.commit() + db_conn.commit() except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index fd1f6cbf7e..e3dbb3b4e6 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -60,10 +60,7 @@ class MediaRepositorySlavedStore( class MediaRepositoryServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = MediaRepositorySlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index a4fc7e91fa..244c604de9 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -78,10 +78,7 @@ class PusherSlaveStore( class PusherServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = PusherSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = PusherSlaveStore def remove_pusher(self, app_id, push_key, user_id): self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 27e1998660..6662340797 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -249,10 +249,7 @@ class SynchrotronApplicationService(object): class SynchrotronServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = SynchrotronSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 1388a42b59..96ffcaf073 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -94,10 +94,7 @@ class UserDirectorySlaveStore( class UserDirectoryServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = UserDirectorySlaveStore def _listen_http(self, listener_config): port = listener_config["port"] diff --git a/synapse/server.py b/synapse/server.py index a6fbc6ec0c..802d679cde 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -81,7 +81,6 @@ from synapse.server_notices.server_notices_manager import ServerNoticesManager from synapse.server_notices.server_notices_sender import ServerNoticesSender from synapse.server_notices.worker_server_notices_sender import WorkerServerNoticesSender from synapse.state import StateHandler, StateResolutionHandler -from synapse.storage import DataStore from synapse.streams.events import EventSources from synapse.util import Clock from synapse.util.distributor import Distributor @@ -172,6 +171,11 @@ class HomeServer(object): 'room_context_handler', ] + # This is overridden in derived application classes + # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be + # instantiated during setup() for future return by get_datastore() + DATASTORE_CLASS = None + def __init__(self, hostname, reactor=None, **kwargs): """ Args: @@ -188,13 +192,20 @@ class HomeServer(object): self.distributor = Distributor() self.ratelimiter = Ratelimiter() + self.datastore = None + # Other kwargs are explicit dependencies for depname in kwargs: setattr(self, depname, kwargs[depname]) def setup(self): logger.info("Setting up.") - self.datastore = DataStore(self.get_db_conn(), self) + if self.DATASTORE_CLASS is None: + raise RuntimeError("%s does not define a DATASTORE_CLASS" % ( + self.__class__.__name__, + )) + with self.get_db_conn() as conn: + self.datastore = self.DATASTORE_CLASS(conn, self) logger.info("Finished setting up.") def get_reactor(self): From 71990b3caeeb6061d4419af516271af7a2b7b429 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Aug 2018 13:42:20 +0100 Subject: [PATCH 004/138] changelog --- changelog.d/3764.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3764.misc diff --git a/changelog.d/3764.misc b/changelog.d/3764.misc new file mode 100644 index 0000000000..f3614f1982 --- /dev/null +++ b/changelog.d/3764.misc @@ -0,0 +1 @@ +Make sure that we close db connections opened during init \ No newline at end of file From 32eb1dedd294831f2bafc79a24dae91eee319563 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Aug 2018 17:10:43 +0100 Subject: [PATCH 005/138] use abc.abstractproperty This gives clearer messages when someone gets it wrong --- synapse/server.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/server.py b/synapse/server.py index 802d679cde..938a05f9dc 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -19,6 +19,7 @@ # partial one for unit test mocking. # Imports required for the default HomeServer() implementation +import abc import logging from twisted.enterprise import adbapi @@ -110,6 +111,8 @@ class HomeServer(object): config (synapse.config.homeserver.HomeserverConfig): """ + __metaclass__ = abc.ABCMeta + DEPENDENCIES = [ 'http_client', 'db_pool', @@ -174,7 +177,7 @@ class HomeServer(object): # This is overridden in derived application classes # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be # instantiated during setup() for future return by get_datastore() - DATASTORE_CLASS = None + DATASTORE_CLASS = abc.abstractproperty() def __init__(self, hostname, reactor=None, **kwargs): """ @@ -200,10 +203,6 @@ class HomeServer(object): def setup(self): logger.info("Setting up.") - if self.DATASTORE_CLASS is None: - raise RuntimeError("%s does not define a DATASTORE_CLASS" % ( - self.__class__.__name__, - )) with self.get_db_conn() as conn: self.datastore = self.DATASTORE_CLASS(conn, self) logger.info("Finished setting up.") From 414fa36f3e22d87ea6ce4bdd6af54bce5a0de67c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Aug 2018 17:21:05 +0100 Subject: [PATCH 006/138] Fix up tests --- tests/storage/test_base.py | 4 ++-- tests/test_types.py | 4 ++-- tests/utils.py | 8 ++++++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 7cb5f0e4cf..829f47d2e8 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -20,11 +20,11 @@ from mock import Mock from twisted.internet import defer -from synapse.server import HomeServer from synapse.storage._base import SQLBaseStore from synapse.storage.engines import create_engine from tests import unittest +from tests.utils import TestHomeServer class SQLBaseStoreTestCase(unittest.TestCase): @@ -51,7 +51,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): config = Mock() config.event_cache_size = 1 config.database_config = {"name": "sqlite3"} - hs = HomeServer( + hs = TestHomeServer( "test", db_pool=self.db_pool, config=config, diff --git a/tests/test_types.py b/tests/test_types.py index be072d402b..0f5c8bfaf9 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -14,12 +14,12 @@ # limitations under the License. from synapse.api.errors import SynapseError -from synapse.server import HomeServer from synapse.types import GroupID, RoomAlias, UserID from tests import unittest +from tests.utils import TestHomeServer -mock_homeserver = HomeServer(hostname="my.domain") +mock_homeserver = TestHomeServer(hostname="my.domain") class UserIDTestCase(unittest.TestCase): diff --git a/tests/utils.py b/tests/utils.py index e8ef10445c..657c258a6e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -29,7 +29,7 @@ from synapse.api.errors import CodeMessageException, cs_error from synapse.federation.transport import server from synapse.http.server import HttpServer from synapse.server import HomeServer -from synapse.storage import PostgresEngine +from synapse.storage import DataStore, PostgresEngine from synapse.storage.engines import create_engine from synapse.storage.prepare_database import ( _get_or_create_schema_state, @@ -92,10 +92,14 @@ def setupdb(): atexit.register(_cleanup) +class TestHomeServer(HomeServer): + DATASTORE_CLASS = DataStore + + @defer.inlineCallbacks def setup_test_homeserver( cleanup_func, name="test", datastore=None, config=None, reactor=None, - homeserverToUse=HomeServer, **kargs + homeserverToUse=TestHomeServer, **kargs ): """ Setup a homeserver suitable for running tests against. Keyword arguments From b1580f50fea05ce02dc4663adb23704b33e30bf3 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 28 Aug 2018 23:25:58 +0100 Subject: [PATCH 007/138] don't return non-LL-member state in incremental sync state blocks (#3760) don't return non-LL-member state in incremental sync state blocks --- changelog.d/3760.bugfix | 1 + synapse/handlers/sync.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/3760.bugfix diff --git a/changelog.d/3760.bugfix b/changelog.d/3760.bugfix new file mode 100644 index 0000000000..ce61fb8a2b --- /dev/null +++ b/changelog.d/3760.bugfix @@ -0,0 +1 @@ +Don't return non-LL-member state in incremental sync state blocks diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 648debc8aa..ef20c2296c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -745,9 +745,16 @@ class SyncHandler(object): state_ids = {} if lazy_load_members: if types: + # We're returning an incremental sync, with no "gap" since + # the previous sync, so normally there would be no state to return + # But we're lazy-loading, so the client might need some more + # member events to understand the events in this timeline. + # So we fish out all the member events corresponding to the + # timeline here, and then dedupe any redundant ones below. + state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, types=types, - filtered_types=filtered_types, + filtered_types=None, # we only want members! ) if lazy_load_members and not include_redundant_members: From 82276a18d10e8b66a509344a94cc328584cee46f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 29 Aug 2018 19:06:59 +1000 Subject: [PATCH 008/138] Update CONTRIBUTING to clarify miscs & Markdown (#3730) --- CONTRIBUTING.rst | 7 ++++--- changelog.d/3730.misc | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/3730.misc diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 3d75853aa7..f9de78a460 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -59,9 +59,10 @@ To create a changelog entry, make a new file in the ``changelog.d`` file named in the format of ``PRnumber.type``. The type can be one of ``feature``, ``bugfix``, ``removal`` (also used for deprecations), or ``misc`` (for internal-only changes). The content of -the file is your changelog entry, which can contain RestructuredText -formatting. A note of contributors is welcomed in changelogs for -non-misc changes (the content of misc changes is not displayed). +the file is your changelog entry, which can contain Markdown +formatting. Adding credits to the changelog is encouraged, we value +your contributions and would like to have you shouted out in the +release notes! For example, a fix in PR #1234 would have its changelog entry in ``changelog.d/1234.bugfix``, and contain content like "The security levels of diff --git a/changelog.d/3730.misc b/changelog.d/3730.misc new file mode 100644 index 0000000000..b1ea84f732 --- /dev/null +++ b/changelog.d/3730.misc @@ -0,0 +1 @@ +The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. From 79a8a347a6c55a3ec6255227930dd9aad4b0b78f Mon Sep 17 00:00:00 2001 From: Krombel Date: Wed, 29 Aug 2018 16:26:21 +0200 Subject: [PATCH 009/138] fix #3445 itervalues(d) calls d.itervalues() [PY2] and d.values() [PY3] but SortedDict only implements d.values() --- changelog.d/3445.bugfix | 1 + synapse/federation/send_queue.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3445.bugfix diff --git a/changelog.d/3445.bugfix b/changelog.d/3445.bugfix new file mode 100644 index 0000000000..f7f1c4778d --- /dev/null +++ b/changelog.d/3445.bugfix @@ -0,0 +1 @@ +do not use six.itervalues() on SortedDict() diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0bb468385d..6f5995735a 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -32,7 +32,7 @@ Events are replicated via a separate events stream. import logging from collections import namedtuple -from six import iteritems, itervalues +from six import iteritems from sortedcontainers import SortedDict @@ -117,7 +117,7 @@ class FederationRemoteSendQueue(object): user_ids = set( user_id - for uids in itervalues(self.presence_changed) + for uids in self.presence_changed.values() for user_id in uids ) From 8c0c51ecb337c4dbbec371375e9c376fb2f666f7 Mon Sep 17 00:00:00 2001 From: Krombel Date: Wed, 29 Aug 2018 16:49:26 +0200 Subject: [PATCH 010/138] changelog --- changelog.d/3445.bugfix | 1 - changelog.d/3768.bugfix | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/3445.bugfix create mode 100644 changelog.d/3768.bugfix diff --git a/changelog.d/3445.bugfix b/changelog.d/3445.bugfix deleted file mode 100644 index f7f1c4778d..0000000000 --- a/changelog.d/3445.bugfix +++ /dev/null @@ -1 +0,0 @@ -do not use six.itervalues() on SortedDict() diff --git a/changelog.d/3768.bugfix b/changelog.d/3768.bugfix new file mode 100644 index 0000000000..a039a7fa68 --- /dev/null +++ b/changelog.d/3768.bugfix @@ -0,0 +1 @@ +Fix bug in sending presence over federation From 14e4d4f4bf894f4e70118e03f4f4575e1eb6dab6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 31 Aug 2018 00:19:58 +1000 Subject: [PATCH 011/138] Port storage/ to Python 3 (#3725) --- changelog.d/3725.misc | 1 + jenkins/prepare_synapse.sh | 2 +- synapse/python_dependencies.py | 3 + synapse/storage/_base.py | 32 ++++++++- synapse/storage/deviceinbox.py | 2 +- synapse/storage/devices.py | 10 +-- synapse/storage/end_to_end_keys.py | 6 +- synapse/storage/engines/postgres.py | 9 ++- synapse/storage/events.py | 10 +-- synapse/storage/events_worker.py | 9 +-- synapse/storage/filtering.py | 6 +- synapse/storage/pusher.py | 14 ++-- synapse/storage/transactions.py | 7 +- tests/rest/client/v1/utils.py | 14 ++-- tests/storage/test_purge.py | 106 ++++++++++++++++++++++++++++ tests/unittest.py | 11 +++ tests/utils.py | 2 + 17 files changed, 208 insertions(+), 36 deletions(-) create mode 100644 changelog.d/3725.misc create mode 100644 tests/storage/test_purge.py diff --git a/changelog.d/3725.misc b/changelog.d/3725.misc new file mode 100644 index 0000000000..91ab9d7137 --- /dev/null +++ b/changelog.d/3725.misc @@ -0,0 +1 @@ +The synapse.storage module has been ported to Python 3. diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh index a30179f2aa..d95ca846c4 100755 --- a/jenkins/prepare_synapse.sh +++ b/jenkins/prepare_synapse.sh @@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5' $TOX_BIN/pip install 'pip>=10' { python synapse/python_dependencies.py - echo lxml psycopg2 + echo lxml } | xargs $TOX_BIN/pip install diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 9c55e79ef5..942d7c721f 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -78,6 +78,9 @@ CONDITIONAL_REQUIREMENTS = { "affinity": { "affinity": ["affinity"], }, + "postgres": { + "psycopg2>=2.6": ["psycopg2"] + } } diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 08dffd774f..be61147b9b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -17,9 +17,10 @@ import sys import threading import time -from six import iteritems, iterkeys, itervalues +from six import PY2, iteritems, iterkeys, itervalues from six.moves import intern, range +from canonicaljson import json from prometheus_client import Histogram from twisted.internet import defer @@ -1216,3 +1217,32 @@ class _RollbackButIsFineException(Exception): something went wrong. """ pass + + +def db_to_json(db_content): + """ + Take some data from a database row and return a JSON-decoded object. + + Args: + db_content (memoryview|buffer|bytes|bytearray|unicode) + """ + # psycopg2 on Python 3 returns memoryview objects, which we need to + # cast to bytes to decode + if isinstance(db_content, memoryview): + db_content = db_content.tobytes() + + # psycopg2 on Python 2 returns buffer objects, which we need to cast to + # bytes to decode + if PY2 and isinstance(db_content, buffer): + db_content = bytes(db_content) + + # Decode it to a Unicode string before feeding it to json.loads, so we + # consistenty get a Unicode-containing object out. + if isinstance(db_content, (bytes, bytearray)): + db_content = db_content.decode('utf8') + + try: + return json.loads(db_content) + except Exception: + logging.warning("Tried to decode '%r' as JSON and failed", db_content) + raise diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 73646da025..e06b0bc56d 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -169,7 +169,7 @@ class DeviceInboxStore(BackgroundUpdateStore): local_by_user_then_device = {} for user_id, messages_by_device in messages_by_user_then_device.items(): messages_json_for_user = {} - devices = messages_by_device.keys() + devices = list(messages_by_device.keys()) if len(devices) == 1 and devices[0] == "*": # Handle wildcard device_ids. sql = ( diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index c0943ecf91..d10ff9e4b9 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -24,7 +24,7 @@ from synapse.api.errors import StoreError from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList -from ._base import Cache, SQLBaseStore +from ._base import Cache, SQLBaseStore, db_to_json logger = logging.getLogger(__name__) @@ -411,7 +411,7 @@ class DeviceStore(SQLBaseStore): if device is not None: key_json = device.get("key_json", None) if key_json: - result["keys"] = json.loads(key_json) + result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name @@ -466,7 +466,7 @@ class DeviceStore(SQLBaseStore): retcol="content", desc="_get_cached_user_device", ) - defer.returnValue(json.loads(content)) + defer.returnValue(db_to_json(content)) @cachedInlineCallbacks() def _get_cached_devices_for_user(self, user_id): @@ -479,7 +479,7 @@ class DeviceStore(SQLBaseStore): desc="_get_cached_devices_for_user", ) defer.returnValue({ - device["device_id"]: json.loads(device["content"]) + device["device_id"]: db_to_json(device["content"]) for device in devices }) @@ -511,7 +511,7 @@ class DeviceStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: - result["keys"] = json.loads(key_json) + result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 523b4360c3..1f1721e820 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -14,13 +14,13 @@ # limitations under the License. from six import iteritems -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.util.caches.descriptors import cached -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json class EndToEndKeyStore(SQLBaseStore): @@ -90,7 +90,7 @@ class EndToEndKeyStore(SQLBaseStore): for user_id, device_keys in iteritems(results): for device_id, device_info in iteritems(device_keys): - device_info["keys"] = json.loads(device_info.pop("key_json")) + device_info["keys"] = db_to_json(device_info.pop("key_json")) defer.returnValue(results) diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 8a0386c1a4..42225f8a2a 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -41,13 +41,18 @@ class PostgresEngine(object): db_conn.set_isolation_level( self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ ) + + # Set the bytea output to escape, vs the default of hex + cursor = db_conn.cursor() + cursor.execute("SET bytea_output TO escape") + # Asynchronous commit, don't wait for the server to call fsync before # ending the transaction. # https://www.postgresql.org/docs/current/static/wal-async-commit.html if not self.synchronous_commit: - cursor = db_conn.cursor() cursor.execute("SET synchronous_commit TO OFF") - cursor.close() + + cursor.close() def is_deadlock(self, error): if isinstance(error, self.module.DatabaseError): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f39c8c8461..8bf87f38f7 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -19,7 +19,7 @@ import logging from collections import OrderedDict, deque, namedtuple from functools import wraps -from six import iteritems +from six import iteritems, text_type from six.moves import range from canonicaljson import json @@ -1220,7 +1220,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore "sender": event.sender, "contains_url": ( "url" in event.content - and isinstance(event.content["url"], basestring) + and isinstance(event.content["url"], text_type) ), } for event, _ in events_and_contexts @@ -1529,7 +1529,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore contains_url = "url" in content if contains_url: - contains_url &= isinstance(content["url"], basestring) + contains_url &= isinstance(content["url"], text_type) except (KeyError, AttributeError): # If the event is missing a necessary field then # skip over it. @@ -1910,9 +1910,9 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore (room_id,) ) rows = txn.fetchall() - max_depth = max(row[0] for row in rows) + max_depth = max(row[1] for row in rows) - if max_depth <= token.topological: + if max_depth < token.topological: # We need to ensure we don't delete all the events from the database # otherwise we wouldn't be able to send any events (due to not # having any backwards extremeties) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 59822178ff..a8326f5296 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import itertools import logging from collections import namedtuple @@ -265,7 +266,7 @@ class EventsWorkerStore(SQLBaseStore): """ with Measure(self._clock, "_fetch_event_list"): try: - event_id_lists = zip(*event_list)[0] + event_id_lists = list(zip(*event_list))[0] event_ids = [ item for sublist in event_id_lists for item in sublist ] @@ -299,14 +300,14 @@ class EventsWorkerStore(SQLBaseStore): logger.exception("do_fetch") # We only want to resolve deferreds from the main thread - def fire(evs): + def fire(evs, exc): for _, d in evs: if not d.called: with PreserveLoggingContext(): - d.errback(e) + d.errback(exc) with PreserveLoggingContext(): - self.hs.get_reactor().callFromThread(fire, event_list) + self.hs.get_reactor().callFromThread(fire, event_list, e) @defer.inlineCallbacks def _enqueue_events(self, events, check_redacted=True, allow_rejected=False): diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index 2d5896c5b4..6ddcc909bf 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.api.errors import Codes, SynapseError from synapse.util.caches.descriptors import cachedInlineCallbacks -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json class FilteringStore(SQLBaseStore): @@ -44,7 +44,7 @@ class FilteringStore(SQLBaseStore): desc="get_user_filter", ) - defer.returnValue(json.loads(bytes(def_json).decode("utf-8"))) + defer.returnValue(db_to_json(def_json)) def add_user_filter(self, user_localpart, user_filter): def_json = encode_canonical_json(user_filter) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 8443bd4c1b..c7987bfcdd 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -15,7 +15,8 @@ # limitations under the License. import logging -import types + +import six from canonicaljson import encode_canonical_json, json @@ -27,6 +28,11 @@ from ._base import SQLBaseStore logger = logging.getLogger(__name__) +if six.PY2: + db_binary_type = buffer +else: + db_binary_type = memoryview + class PusherWorkerStore(SQLBaseStore): def _decode_pushers_rows(self, rows): @@ -34,18 +40,18 @@ class PusherWorkerStore(SQLBaseStore): dataJson = r['data'] r['data'] = None try: - if isinstance(dataJson, types.BufferType): + if isinstance(dataJson, db_binary_type): dataJson = str(dataJson).decode("UTF8") r['data'] = json.loads(dataJson) except Exception as e: logger.warn( "Invalid JSON in data for pusher %d: %s, %s", - r['id'], dataJson, e.message, + r['id'], dataJson, e.args[0], ) pass - if isinstance(r['pushkey'], types.BufferType): + if isinstance(r['pushkey'], db_binary_type): r['pushkey'] = str(r['pushkey']).decode("UTF8") return rows diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 428e7fa36e..0c42bd3322 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -18,14 +18,14 @@ from collections import namedtuple import six -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches.descriptors import cached -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview @@ -95,7 +95,8 @@ class TransactionStore(SQLBaseStore): ) if result and result["response_code"]: - return result["response_code"], json.loads(str(result["response_json"])) + return result["response_code"], db_to_json(result["response_json"]) + else: return None diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 40dc4ea256..530dc8ba6d 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -240,7 +240,6 @@ class RestHelper(object): self.assertEquals(200, code) defer.returnValue(response) - @defer.inlineCallbacks def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200): if txn_id is None: txn_id = "m%s" % (str(time.time())) @@ -248,9 +247,16 @@ class RestHelper(object): body = "body_text_here" path = "/_matrix/client/r0/rooms/%s/send/m.room.message/%s" % (room_id, txn_id) - content = '{"msgtype":"m.text","body":"%s"}' % body + content = {"msgtype": "m.text", "body": body} if tok: path = path + "?access_token=%s" % tok - (code, response) = yield self.mock_resource.trigger("PUT", path, content) - self.assertEquals(expect_code, code, msg=str(response)) + request, channel = make_request("PUT", path, json.dumps(content).encode('utf8')) + render(request, self.resource, self.hs.get_reactor()) + + assert int(channel.result["code"]) == expect_code, ( + "Expected: %d, got: %d, resp: %r" + % (expect_code, int(channel.result["code"]), channel.result["body"]) + ) + + return channel.json_body diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py new file mode 100644 index 0000000000..f671599cb8 --- /dev/null +++ b/tests/storage/test_purge.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest.client.v1 import room + +from tests.unittest import HomeserverTestCase + + +class PurgeTests(HomeserverTestCase): + + user_id = "@red:server" + servlets = [room.register_servlets] + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver("server", http_client=None) + return hs + + def prepare(self, reactor, clock, hs): + self.room_id = self.helper.create_room_as(self.user_id) + + def test_purge(self): + """ + Purging a room will delete everything before the topological point. + """ + # Send four messages to the room + first = self.helper.send(self.room_id, body="test1") + second = self.helper.send(self.room_id, body="test2") + third = self.helper.send(self.room_id, body="test3") + last = self.helper.send(self.room_id, body="test4") + + storage = self.hs.get_datastore() + + # Get the topological token + event = storage.get_topological_token_for_event(last["event_id"]) + self.pump() + event = self.successResultOf(event) + + # Purge everything before this topological token + purge = storage.purge_history(self.room_id, event, True) + self.pump() + self.assertEqual(self.successResultOf(purge), None) + + # Try and get the events + get_first = storage.get_event(first["event_id"]) + get_second = storage.get_event(second["event_id"]) + get_third = storage.get_event(third["event_id"]) + get_last = storage.get_event(last["event_id"]) + self.pump() + + # 1-3 should fail and last will succeed, meaning that 1-3 are deleted + # and last is not. + self.failureResultOf(get_first) + self.failureResultOf(get_second) + self.failureResultOf(get_third) + self.successResultOf(get_last) + + def test_purge_wont_delete_extrems(self): + """ + Purging a room will delete everything before the topological point. + """ + # Send four messages to the room + first = self.helper.send(self.room_id, body="test1") + second = self.helper.send(self.room_id, body="test2") + third = self.helper.send(self.room_id, body="test3") + last = self.helper.send(self.room_id, body="test4") + + storage = self.hs.get_datastore() + + # Set the topological token higher than it should be + event = storage.get_topological_token_for_event(last["event_id"]) + self.pump() + event = self.successResultOf(event) + event = "t{}-{}".format( + *list(map(lambda x: x + 1, map(int, event[1:].split("-")))) + ) + + # Purge everything before this topological token + purge = storage.purge_history(self.room_id, event, True) + self.pump() + f = self.failureResultOf(purge) + self.assertIn("greater than forward", f.value.args[0]) + + # Try and get the events + get_first = storage.get_event(first["event_id"]) + get_second = storage.get_event(second["event_id"]) + get_third = storage.get_event(third["event_id"]) + get_last = storage.get_event(last["event_id"]) + self.pump() + + # Nothing is deleted. + self.successResultOf(get_first) + self.successResultOf(get_second) + self.successResultOf(get_third) + self.successResultOf(get_last) diff --git a/tests/unittest.py b/tests/unittest.py index d852e2465a..8b513bb32b 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -151,6 +151,7 @@ class HomeserverTestCase(TestCase): hijack_auth (bool): Whether to hijack auth to return the user specified in user_id. """ + servlets = [] hijack_auth = True @@ -279,3 +280,13 @@ class HomeserverTestCase(TestCase): kwargs = dict(kwargs) kwargs.update(self._hs_args) return setup_test_homeserver(self.addCleanup, *args, **kwargs) + + def pump(self): + """ + Pump the reactor enough that Deferreds will fire. + """ + self.reactor.pump([0.0] * 100) + + def get_success(self, d): + self.pump() + return self.successResultOf(d) diff --git a/tests/utils.py b/tests/utils.py index 657c258a6e..179b592501 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -147,6 +147,8 @@ def setup_test_homeserver( config.max_mau_value = 50 config.mau_limits_reserved_threepids = [] config.admin_contact = None + config.rc_messages_per_second = 10000 + config.rc_message_burst_count = 10000 # we need a sane default_room_version, otherwise attempts to create rooms will # fail. From ea068d6f3cd5ed1bc9a39b2fd43e19d6d40f18da Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 10:49:14 +0100 Subject: [PATCH 012/138] fix bug where preserved threepid user comes to sign up and server is mau blocked --- synapse/api/auth.py | 10 +++++++++- synapse/handlers/register.py | 3 ++- synapse/rest/client/v1_only/register.py | 6 +++++- synapse/rest/client/v2_alpha/register.py | 5 +++++ tests/api/test_auth.py | 17 +++++++++++++++++ 5 files changed, 38 insertions(+), 3 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a7e3f7a7ac..9c207b9537 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -775,7 +775,7 @@ class Auth(object): ) @defer.inlineCallbacks - def check_auth_blocking(self, user_id=None): + def check_auth_blocking(self, user_id=None, threepid=None): """Checks if the user should be rejected for some external reason, such as monthly active user limiting or global disable flag @@ -806,6 +806,14 @@ class Auth(object): is_trial = yield self.store.is_trial_user(user_id) if is_trial: return + elif threepid: + # If the user does not exist yet, but is signing up with a + # reserved threepid then pass auth check + for tp in self.hs.config.mau_limits_reserved_threepids: + if (threepid['medium'] == tp['medium'] + and threepid['address'] == tp['address']): + return + # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() if current_mau >= self.hs.config.max_mau_value: diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index f03ee1476b..1e53f2c635 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -125,6 +125,7 @@ class RegistrationHandler(BaseHandler): guest_access_token=None, make_guest=False, admin=False, + threepid=None, ): """Registers a new client on the server. @@ -145,7 +146,7 @@ class RegistrationHandler(BaseHandler): RegistrationError if there was a problem registering. """ - yield self.auth.check_auth_blocking() + yield self.auth.check_auth_blocking(threepid=threepid) password_hash = None if password: password_hash = yield self.auth_handler().hash(password) diff --git a/synapse/rest/client/v1_only/register.py b/synapse/rest/client/v1_only/register.py index 5e99cffbcb..2c7bbcb171 100644 --- a/synapse/rest/client/v1_only/register.py +++ b/synapse/rest/client/v1_only/register.py @@ -281,11 +281,15 @@ class RegisterRestServlet(ClientV1RestServlet): register_json["user"].encode("utf-8") if "user" in register_json else None ) + threepid = None + if session[LoginType.EMAIL_IDENTITY]: + threepid = session["threepidCreds"] handler = self.handlers.registration_handler (user_id, token) = yield handler.register( localpart=desired_user_id, - password=password + password=password, + threepid=threepid, ) if session[LoginType.EMAIL_IDENTITY]: diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 2f64155d13..45113e5386 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -395,11 +395,16 @@ class RegisterRestServlet(RestServlet): if desired_username is not None: desired_username = desired_username.lower() + threepid = None + if auth_result: + threepid = auth_result.get(LoginType.EMAIL_IDENTITY) + (registered_user_id, _) = yield self.registration_handler.register( localpart=desired_username, password=new_password, guest_access_token=guest_access_token, generate_token=False, + threepid=threepid, ) # remember that we've now registered that user account, and with diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 54e396d19d..f65a27e5f1 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -467,6 +467,23 @@ class AuthTestCase(unittest.TestCase): ) yield self.auth.check_auth_blocking() + @defer.inlineCallbacks + def test_reserved_threepid(self): + self.hs.config.limit_usage_by_mau = True + self.hs.config.max_mau_value = 1 + threepid = {'medium': 'email', 'address': 'reserved@server.com'} + unknown_threepid = {'medium': 'email', 'address': 'unreserved@server.com'} + self.hs.config.mau_limits_reserved_threepids = [threepid] + + yield self.store.register(user_id='user1', token="123", password_hash=None) + with self.assertRaises(ResourceLimitError): + yield self.auth.check_auth_blocking() + + with self.assertRaises(ResourceLimitError): + yield self.auth.check_auth_blocking(threepid=unknown_threepid) + + yield self.auth.check_auth_blocking(threepid=threepid) + @defer.inlineCallbacks def test_hs_disabled(self): self.hs.config.hs_disabled = True From 3d6aa06577e170a8af5f0e19a3102d4210730c8f Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 10:56:37 +0100 Subject: [PATCH 013/138] news fragemnt --- changelog.d/3777.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3777.bugfix diff --git a/changelog.d/3777.bugfix b/changelog.d/3777.bugfix new file mode 100644 index 0000000000..93b5b8fa57 --- /dev/null +++ b/changelog.d/3777.bugfix @@ -0,0 +1 @@ +Fix fix bug where preserved threepid user comes to sign up and server is mau blocked From 09f3cf1a7ef0c533d052a5c87257503b710093c6 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 15:42:51 +0100 Subject: [PATCH 014/138] ensure post registration auth checks do not fail erroneously --- synapse/api/auth.py | 7 ++----- synapse/rest/client/v1_only/register.py | 4 ++++ synapse/rest/client/v2_alpha/register.py | 4 ++++ synapse/storage/monthly_active_users.py | 15 ++++++++++++++- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 9c207b9537..6a97c06110 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -809,11 +809,8 @@ class Auth(object): elif threepid: # If the user does not exist yet, but is signing up with a # reserved threepid then pass auth check - for tp in self.hs.config.mau_limits_reserved_threepids: - if (threepid['medium'] == tp['medium'] - and threepid['address'] == tp['address']): - return - + if is_threepid_reserved(threepid): + return # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() if current_mau >= self.hs.config.max_mau_value: diff --git a/synapse/rest/client/v1_only/register.py b/synapse/rest/client/v1_only/register.py index 2c7bbcb171..95873e03d5 100644 --- a/synapse/rest/client/v1_only/register.py +++ b/synapse/rest/client/v1_only/register.py @@ -291,6 +291,10 @@ class RegisterRestServlet(ClientV1RestServlet): password=password, threepid=threepid, ) + # Necessary due to auth checks prior to the threepid being + # written to the db + if self.store.is_threepid_reserved(threepid): + self.store.upsert_monthly_active_user(registered_user_id) if session[LoginType.EMAIL_IDENTITY]: logger.debug("Binding emails %s to %s" % ( diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 45113e5386..f22b7577ea 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -406,6 +406,10 @@ class RegisterRestServlet(RestServlet): generate_token=False, threepid=threepid, ) + # Necessary due to auth checks prior to the threepid being + # written to the db + if self.store.is_threepid_reserved(threepid): + self.store.upsert_monthly_active_user(registered_user_id) # remember that we've now registered that user account, and with # what user ID (since the user may not have specified) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index d178f5c5ba..173867c4b1 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -36,7 +36,6 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def initialise_reserved_users(self, threepids): - # TODO Why can't I do this in init? store = self.hs.get_datastore() reserved_user_list = [] @@ -220,3 +219,17 @@ class MonthlyActiveUsersStore(SQLBaseStore): yield self.upsert_monthly_active_user(user_id) elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY: yield self.upsert_monthly_active_user(user_id) + + def is_threepid_reserved(self, threepid): + """Check the threepid against the reserved threepid config + Args: + threepid(dict) - The threepid to test for + Returns: + boolean Is the threepid undertest reserved_user + """ + for tp in self.hs.config.mau_limits_reserved_threepids: + if (threepid['medium'] == tp['medium'] + and threepid['address'] == tp['address']): + return True + else: + return False From a796bdd35efbd17c4a1fa48e1245f6d57403b232 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 15:57:33 +0100 Subject: [PATCH 015/138] typo --- changelog.d/3777.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/3777.bugfix b/changelog.d/3777.bugfix index 93b5b8fa57..46efc543a7 100644 --- a/changelog.d/3777.bugfix +++ b/changelog.d/3777.bugfix @@ -1 +1 @@ -Fix fix bug where preserved threepid user comes to sign up and server is mau blocked +Fix bug where preserved threepid user comes to sign up and server is mau blocked From e8e540630ec96d6ed856fb143dbb62b91e15084d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 16:09:15 +0100 Subject: [PATCH 016/138] fix reference to is_threepid_reserved --- synapse/api/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 6a97c06110..a36fb6b3bd 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -809,7 +809,7 @@ class Auth(object): elif threepid: # If the user does not exist yet, but is signing up with a # reserved threepid then pass auth check - if is_threepid_reserved(threepid): + if self.store.is_threepid_reserved(threepid): return # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() From 0b01281e77aee7e69925f36dbb6a798772a98a45 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 17:11:11 +0100 Subject: [PATCH 017/138] move threepid checker to config, add missing yields --- synapse/api/auth.py | 13 +++++++++++-- synapse/config/server.py | 17 +++++++++++++++++ synapse/rest/client/v1_only/register.py | 7 ++++--- synapse/rest/client/v2_alpha/register.py | 5 +++-- synapse/storage/monthly_active_users.py | 14 -------------- tests/utils.py | 6 ++++++ 6 files changed, 41 insertions(+), 21 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a36fb6b3bd..a89687f420 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -26,6 +26,7 @@ import synapse.types from synapse import event_auth from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, ResourceLimitError +from synapse.config.server import is_threepid_reserved from synapse.types import UserID from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache from synapse.util.caches.lrucache import LruCache @@ -782,6 +783,11 @@ class Auth(object): Args: user_id(str|None): If present, checks for presence against existing MAU cohort + threepid(dict|None): If present, checks for presence against configured + reserved threepid. Used in cases where the user is trying register + with a MAU blocked server, normally they would be rejected but their + threepid is on the reserved list. user_id and + threepid should never be set at the same time. """ # Never fail an auth check for the server notices users @@ -797,6 +803,10 @@ class Auth(object): limit_type=self.hs.config.hs_disabled_limit_type ) if self.hs.config.limit_usage_by_mau is True: + + if user_id and threepid: + logger.warn("Called with both user_id and threepid, this shoudn't happen") + # If the user is already part of the MAU cohort or a trial user if user_id: timestamp = yield self.store.user_last_seen_monthly_active(user_id) @@ -809,14 +819,13 @@ class Auth(object): elif threepid: # If the user does not exist yet, but is signing up with a # reserved threepid then pass auth check - if self.store.is_threepid_reserved(threepid): + if is_threepid_reserved(self.hs.config, threepid): return # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() if current_mau >= self.hs.config.max_mau_value: raise ResourceLimitError( 403, "Monthly Active User Limit Exceeded", - admin_contact=self.hs.config.admin_contact, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, limit_type="monthly_active_user" diff --git a/synapse/config/server.py b/synapse/config/server.py index 2faf472189..c1c7c0105e 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -404,6 +404,23 @@ class ServerConfig(Config): " service on the given port.") +def is_threepid_reserved(config, threepid): + """Check the threepid against the reserved threepid config + Args: + config(ServerConfig) - to access server config attributes + threepid(dict) - The threepid to test for + + Returns: + boolean Is the threepid undertest reserved_user + """ + + for tp in config.mau_limits_reserved_threepids: + if (threepid['medium'] == tp['medium'] + and threepid['address'] == tp['address']): + return True + return False + + def read_gc_thresholds(thresholds): """Reads the three integer thresholds for garbage collection. Ensures that the thresholds are integers if thresholds are supplied. diff --git a/synapse/rest/client/v1_only/register.py b/synapse/rest/client/v1_only/register.py index 95873e03d5..dadb376b02 100644 --- a/synapse/rest/client/v1_only/register.py +++ b/synapse/rest/client/v1_only/register.py @@ -23,6 +23,7 @@ from twisted.internet import defer import synapse.util.stringutils as stringutils from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError +from synapse.config.server import is_threepid_reserved from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request from synapse.rest.client.v1.base import ClientV1RestServlet from synapse.types import create_requester @@ -282,7 +283,7 @@ class RegisterRestServlet(ClientV1RestServlet): if "user" in register_json else None ) threepid = None - if session[LoginType.EMAIL_IDENTITY]: + if session.get(LoginType.EMAIL_IDENTITY): threepid = session["threepidCreds"] handler = self.handlers.registration_handler @@ -293,8 +294,8 @@ class RegisterRestServlet(ClientV1RestServlet): ) # Necessary due to auth checks prior to the threepid being # written to the db - if self.store.is_threepid_reserved(threepid): - self.store.upsert_monthly_active_user(registered_user_id) + if is_threepid_reserved(self.hs.config, threepid): + yield self.store.upsert_monthly_active_user(user_id) if session[LoginType.EMAIL_IDENTITY]: logger.debug("Binding emails %s to %s" % ( diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index f22b7577ea..2fb4d43ccb 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -26,6 +26,7 @@ import synapse import synapse.types from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError, UnrecognizedRequestError +from synapse.config.server import is_threepid_reserved from synapse.http.servlet import ( RestServlet, assert_params_in_dict, @@ -408,8 +409,8 @@ class RegisterRestServlet(RestServlet): ) # Necessary due to auth checks prior to the threepid being # written to the db - if self.store.is_threepid_reserved(threepid): - self.store.upsert_monthly_active_user(registered_user_id) + if is_threepid_reserved(self.hs.config, threepid): + yield self.store.upsert_monthly_active_user(registered_user_id) # remember that we've now registered that user account, and with # what user ID (since the user may not have specified) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 173867c4b1..c7899d7fd2 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -219,17 +219,3 @@ class MonthlyActiveUsersStore(SQLBaseStore): yield self.upsert_monthly_active_user(user_id) elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY: yield self.upsert_monthly_active_user(user_id) - - def is_threepid_reserved(self, threepid): - """Check the threepid against the reserved threepid config - Args: - threepid(dict) - The threepid to test for - Returns: - boolean Is the threepid undertest reserved_user - """ - for tp in self.hs.config.mau_limits_reserved_threepids: - if (threepid['medium'] == tp['medium'] - and threepid['address'] == tp['address']): - return True - else: - return False diff --git a/tests/utils.py b/tests/utils.py index 179b592501..63e30dc6c0 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -26,6 +26,7 @@ from twisted.internet import defer, reactor from synapse.api.constants import EventTypes from synapse.api.errors import CodeMessageException, cs_error +from synapse.config.server import ServerConfig from synapse.federation.transport import server from synapse.http.server import HttpServer from synapse.server import HomeServer @@ -158,6 +159,11 @@ def setup_test_homeserver( # background, which upsets the test runner. config.update_user_directory = False + def is_threepid_reserved(threepid): + return ServerConfig.is_threepid_reserved(config, threepid) + + config.is_threepid_reserved.side_effect = is_threepid_reserved + config.use_frozen_dicts = True config.ldap_enabled = False From 301cb60d0b2de55f7feac24a043b2624ad3c8733 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 31 Aug 2018 17:29:35 +0100 Subject: [PATCH 018/138] assert rather than warn --- synapse/api/auth.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a89687f420..34382e4e3c 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -783,6 +783,7 @@ class Auth(object): Args: user_id(str|None): If present, checks for presence against existing MAU cohort + threepid(dict|None): If present, checks for presence against configured reserved threepid. Used in cases where the user is trying register with a MAU blocked server, normally they would be rejected but their @@ -803,9 +804,7 @@ class Auth(object): limit_type=self.hs.config.hs_disabled_limit_type ) if self.hs.config.limit_usage_by_mau is True: - - if user_id and threepid: - logger.warn("Called with both user_id and threepid, this shoudn't happen") + assert not (user_id and threepid) # If the user is already part of the MAU cohort or a trial user if user_id: From a395f1ddb3133134525ac5ae94ff03d94c95dfe6 Mon Sep 17 00:00:00 2001 From: Colin W Date: Sun, 2 Sep 2018 17:30:07 -0500 Subject: [PATCH 019/138] Update readme on develop branch --- README.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.rst b/README.rst index 4c5971d043..46498a7ee0 100644 --- a/README.rst +++ b/README.rst @@ -747,6 +747,18 @@ so an example nginx configuration might look like:: } } +and an example apache configuration may look like:: + + + SSLEngine on + ServerName matrix.example.com; + + + ProxyPass http://127.0.0.1:8008/_matrix/ nocanon + ProxyPassReverse http://127.0.0.1:8008/_matrix/ + + + You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true`` for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are recorded correctly. From 81942c109db7a5c816a65d5931d7be7a6560b6d7 Mon Sep 17 00:00:00 2001 From: Colin W Date: Sun, 2 Sep 2018 23:28:03 -0500 Subject: [PATCH 020/138] Remove end '/'s --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 46498a7ee0..0fa39a416d 100644 --- a/README.rst +++ b/README.rst @@ -754,8 +754,8 @@ and an example apache configuration may look like:: ServerName matrix.example.com; - ProxyPass http://127.0.0.1:8008/_matrix/ nocanon - ProxyPassReverse http://127.0.0.1:8008/_matrix/ + ProxyPass http://127.0.0.1:8008/_matrix nocanon + ProxyPassReverse http://127.0.0.1:8008/_matrix From 905f8de6737c18a629de054c4be01fa70990eaae Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Sep 2018 22:17:06 +1000 Subject: [PATCH 021/138] Create 3378.misc --- changelog.d/3378.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3378.misc diff --git a/changelog.d/3378.misc b/changelog.d/3378.misc new file mode 100644 index 0000000000..8f88f88e69 --- /dev/null +++ b/changelog.d/3378.misc @@ -0,0 +1 @@ +Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. From 06ee2b7cc5e792acfade45217d6aaf1b56a201b8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Sep 2018 15:43:01 +0100 Subject: [PATCH 022/138] Newsfile --- changelog.d/3737.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3737.misc diff --git a/changelog.d/3737.misc b/changelog.d/3737.misc new file mode 100644 index 0000000000..0361da4ebc --- /dev/null +++ b/changelog.d/3737.misc @@ -0,0 +1 @@ +Remove redundant state resolution function From 77055dba92af80cdfdffc30fd3084ecd24902c2e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 4 Sep 2018 02:21:48 +1000 Subject: [PATCH 023/138] Fix tests on postgresql (#3740) --- .travis.yml | 4 - changelog.d/3740.misc | 1 + tests/handlers/test_device.py | 143 +++++++++--------- tests/replication/slave/storage/_base.py | 95 ++++++------ .../slave/storage/test_account_data.py | 20 +-- .../replication/slave/storage/test_events.py | 110 +++++++------- .../slave/storage/test_receipts.py | 15 +- tests/server.py | 1 + tests/storage/test_appservice.py | 131 ++++++++-------- tests/storage/test_directory.py | 3 +- tests/storage/test_event_federation.py | 6 +- tests/storage/test_monthly_active_users.py | 136 +++++++++-------- tests/storage/test_presence.py | 7 +- tests/storage/test_profile.py | 2 +- tests/storage/test_user_directory.py | 2 +- tests/test_visibility.py | 2 +- tests/unittest.py | 7 +- tests/utils.py | 10 +- 18 files changed, 355 insertions(+), 340 deletions(-) create mode 100644 changelog.d/3740.misc diff --git a/.travis.yml b/.travis.yml index 318701c9f8..11c76db2e5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,10 +35,6 @@ matrix: - python: 3.6 env: TOX_ENV=check-newsfragment - allow_failures: - - python: 2.7 - env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" - install: - pip install tox diff --git a/changelog.d/3740.misc b/changelog.d/3740.misc new file mode 100644 index 0000000000..4dcb7fb5de --- /dev/null +++ b/changelog.d/3740.misc @@ -0,0 +1 @@ +The test suite now passes on PostgreSQL. diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 56e7acd37c..a3aa0a1cf2 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,79 +14,79 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - import synapse.api.errors import synapse.handlers.device import synapse.storage -from tests import unittest, utils +from tests import unittest user1 = "@boris:aaa" user2 = "@theresa:bbb" -class DeviceTestCase(unittest.TestCase): - def __init__(self, *args, **kwargs): - super(DeviceTestCase, self).__init__(*args, **kwargs) - self.store = None # type: synapse.storage.DataStore - self.handler = None # type: synapse.handlers.device.DeviceHandler - self.clock = None # type: utils.MockClock - - @defer.inlineCallbacks - def setUp(self): - hs = yield utils.setup_test_homeserver(self.addCleanup) +class DeviceTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver("server", http_client=None) self.handler = hs.get_device_handler() self.store = hs.get_datastore() - self.clock = hs.get_clock() + return hs + + def prepare(self, reactor, clock, hs): + # These tests assume that it starts 1000 seconds in. + self.reactor.advance(1000) - @defer.inlineCallbacks def test_device_is_created_if_doesnt_exist(self): - res = yield self.handler.check_device_registered( - user_id="@boris:foo", - device_id="fco", - initial_device_display_name="display name", + res = self.get_success( + self.handler.check_device_registered( + user_id="@boris:foo", + device_id="fco", + initial_device_display_name="display name", + ) ) self.assertEqual(res, "fco") - dev = yield self.handler.store.get_device("@boris:foo", "fco") + dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) self.assertEqual(dev["display_name"], "display name") - @defer.inlineCallbacks def test_device_is_preserved_if_exists(self): - res1 = yield self.handler.check_device_registered( - user_id="@boris:foo", - device_id="fco", - initial_device_display_name="display name", + res1 = self.get_success( + self.handler.check_device_registered( + user_id="@boris:foo", + device_id="fco", + initial_device_display_name="display name", + ) ) self.assertEqual(res1, "fco") - res2 = yield self.handler.check_device_registered( - user_id="@boris:foo", - device_id="fco", - initial_device_display_name="new display name", + res2 = self.get_success( + self.handler.check_device_registered( + user_id="@boris:foo", + device_id="fco", + initial_device_display_name="new display name", + ) ) self.assertEqual(res2, "fco") - dev = yield self.handler.store.get_device("@boris:foo", "fco") + dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) self.assertEqual(dev["display_name"], "display name") - @defer.inlineCallbacks def test_device_id_is_made_up_if_unspecified(self): - device_id = yield self.handler.check_device_registered( - user_id="@theresa:foo", - device_id=None, - initial_device_display_name="display", + device_id = self.get_success( + self.handler.check_device_registered( + user_id="@theresa:foo", + device_id=None, + initial_device_display_name="display", + ) ) - dev = yield self.handler.store.get_device("@theresa:foo", device_id) + dev = self.get_success(self.handler.store.get_device("@theresa:foo", device_id)) self.assertEqual(dev["display_name"], "display") - @defer.inlineCallbacks def test_get_devices_by_user(self): - yield self._record_users() + self._record_users() + + res = self.get_success(self.handler.get_devices_by_user(user1)) - res = yield self.handler.get_devices_by_user(user1) self.assertEqual(3, len(res)) device_map = {d["device_id"]: d for d in res} self.assertDictContainsSubset( @@ -119,11 +120,10 @@ class DeviceTestCase(unittest.TestCase): device_map["abc"], ) - @defer.inlineCallbacks def test_get_device(self): - yield self._record_users() + self._record_users() - res = yield self.handler.get_device(user1, "abc") + res = self.get_success(self.handler.get_device(user1, "abc")) self.assertDictContainsSubset( { "user_id": user1, @@ -135,59 +135,66 @@ class DeviceTestCase(unittest.TestCase): res, ) - @defer.inlineCallbacks def test_delete_device(self): - yield self._record_users() + self._record_users() # delete the device - yield self.handler.delete_device(user1, "abc") + self.get_success(self.handler.delete_device(user1, "abc")) # check the device was deleted - with self.assertRaises(synapse.api.errors.NotFoundError): - yield self.handler.get_device(user1, "abc") + res = self.handler.get_device(user1, "abc") + self.pump() + self.assertIsInstance( + self.failureResultOf(res).value, synapse.api.errors.NotFoundError + ) # we'd like to check the access token was invalidated, but that's a # bit of a PITA. - @defer.inlineCallbacks def test_update_device(self): - yield self._record_users() + self._record_users() update = {"display_name": "new display"} - yield self.handler.update_device(user1, "abc", update) + self.get_success(self.handler.update_device(user1, "abc", update)) - res = yield self.handler.get_device(user1, "abc") + res = self.get_success(self.handler.get_device(user1, "abc")) self.assertEqual(res["display_name"], "new display") - @defer.inlineCallbacks def test_update_unknown_device(self): update = {"display_name": "new_display"} - with self.assertRaises(synapse.api.errors.NotFoundError): - yield self.handler.update_device("user_id", "unknown_device_id", update) + res = self.handler.update_device("user_id", "unknown_device_id", update) + self.pump() + self.assertIsInstance( + self.failureResultOf(res).value, synapse.api.errors.NotFoundError + ) - @defer.inlineCallbacks def _record_users(self): # check this works for both devices which have a recorded client_ip, # and those which don't. - yield self._record_user(user1, "xyz", "display 0") - yield self._record_user(user1, "fco", "display 1", "token1", "ip1") - yield self._record_user(user1, "abc", "display 2", "token2", "ip2") - yield self._record_user(user1, "abc", "display 2", "token3", "ip3") + self._record_user(user1, "xyz", "display 0") + self._record_user(user1, "fco", "display 1", "token1", "ip1") + self._record_user(user1, "abc", "display 2", "token2", "ip2") + self._record_user(user1, "abc", "display 2", "token3", "ip3") - yield self._record_user(user2, "def", "dispkay", "token4", "ip4") + self._record_user(user2, "def", "dispkay", "token4", "ip4") + + self.reactor.advance(10000) - @defer.inlineCallbacks def _record_user( self, user_id, device_id, display_name, access_token=None, ip=None ): - device_id = yield self.handler.check_device_registered( - user_id=user_id, - device_id=device_id, - initial_device_display_name=display_name, + device_id = self.get_success( + self.handler.check_device_registered( + user_id=user_id, + device_id=device_id, + initial_device_display_name=display_name, + ) ) if ip is not None: - yield self.store.insert_client_ip( - user_id, access_token, ip, "user_agent", device_id + self.get_success( + self.store.insert_client_ip( + user_id, access_token, ip, "user_agent", device_id + ) ) - self.clock.advance_time(1000) + self.reactor.advance(1000) diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 65df116efc..089cecfbee 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -1,4 +1,5 @@ # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,89 +12,91 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import tempfile from mock import Mock, NonCallableMock -from twisted.internet import defer, reactor -from twisted.internet.defer import Deferred +import attr from synapse.replication.tcp.client import ( ReplicationClientFactory, ReplicationClientHandler, ) from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory -from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable from tests import unittest -from tests.utils import setup_test_homeserver -class TestReplicationClientHandler(ReplicationClientHandler): - """Overrides on_rdata so that we can wait for it to happen""" +class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): - def __init__(self, store): - super(TestReplicationClientHandler, self).__init__(store) - self._rdata_awaiters = [] - - def await_replication(self): - d = Deferred() - self._rdata_awaiters.append(d) - return make_deferred_yieldable(d) - - def on_rdata(self, stream_name, token, rows): - awaiters = self._rdata_awaiters - self._rdata_awaiters = [] - super(TestReplicationClientHandler, self).on_rdata(stream_name, token, rows) - with PreserveLoggingContext(): - for a in awaiters: - a.callback(None) - - -class BaseSlavedStoreTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - self.hs = yield setup_test_homeserver( - self.addCleanup, + hs = self.setup_test_homeserver( "blue", - http_client=None, federation_client=Mock(), ratelimiter=NonCallableMock(spec_set=["send_message"]), ) - self.hs.get_ratelimiter().send_message.return_value = (True, 0) + + hs.get_ratelimiter().send_message.return_value = (True, 0) + + return hs + + def prepare(self, reactor, clock, hs): self.master_store = self.hs.get_datastore() self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) - # XXX: mktemp is unsafe and should never be used. but we're just a test. - path = tempfile.mktemp(prefix="base_slaved_store_test_case_socket") - listener = reactor.listenUNIX(path, server_factory) - self.addCleanup(listener.stopListening) self.streamer = server_factory.streamer - self.replication_handler = TestReplicationClientHandler(self.slaved_store) + self.replication_handler = ReplicationClientHandler(self.slaved_store) client_factory = ReplicationClientFactory( self.hs, "client_name", self.replication_handler ) - client_connector = reactor.connectUNIX(path, client_factory) - self.addCleanup(client_factory.stopTrying) - self.addCleanup(client_connector.disconnect) + + server = server_factory.buildProtocol(None) + client = client_factory.buildProtocol(None) + + @attr.s + class FakeTransport(object): + + other = attr.ib() + disconnecting = False + buffer = attr.ib(default=b'') + + def registerProducer(self, producer, streaming): + + self.producer = producer + + def _produce(): + self.producer.resumeProducing() + reactor.callLater(0.1, _produce) + + reactor.callLater(0.0, _produce) + + def write(self, byt): + self.buffer = self.buffer + byt + + if getattr(self.other, "transport") is not None: + self.other.dataReceived(self.buffer) + self.buffer = b"" + + def writeSequence(self, seq): + for x in seq: + self.write(x) + + client.makeConnection(FakeTransport(server)) + server.makeConnection(FakeTransport(client)) def replicate(self): """Tell the master side of replication that something has happened, and then wait for the replication to occur. """ - # xxx: should we be more specific in what we wait for? - d = self.replication_handler.await_replication() self.streamer.on_notifier_poke() - return d + self.pump(0.1) - @defer.inlineCallbacks def check(self, method, args, expected_result=None): - master_result = yield getattr(self.master_store, method)(*args) - slaved_result = yield getattr(self.slaved_store, method)(*args) + master_result = self.get_success(getattr(self.master_store, method)(*args)) + slaved_result = self.get_success(getattr(self.slaved_store, method)(*args)) if expected_result is not None: self.assertEqual(master_result, expected_result) self.assertEqual(slaved_result, expected_result) diff --git a/tests/replication/slave/storage/test_account_data.py b/tests/replication/slave/storage/test_account_data.py index 87cc2b2fba..43e3248703 100644 --- a/tests/replication/slave/storage/test_account_data.py +++ b/tests/replication/slave/storage/test_account_data.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from twisted.internet import defer - from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from ._base import BaseSlavedStoreTestCase @@ -27,16 +24,19 @@ class SlavedAccountDataStoreTestCase(BaseSlavedStoreTestCase): STORE_TYPE = SlavedAccountDataStore - @defer.inlineCallbacks def test_user_account_data(self): - yield self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 1}) - yield self.replicate() - yield self.check( + self.get_success( + self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 1}) + ) + self.replicate() + self.check( "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 1} ) - yield self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 2}) - yield self.replicate() - yield self.check( + self.get_success( + self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 2}) + ) + self.replicate() + self.check( "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 2} ) diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 2ba80ccdcf..db44d33c68 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from synapse.events import FrozenEvent, _EventInternalMetadata from synapse.events.snapshot import EventContext from synapse.replication.slave.storage.events import SlavedEventStore @@ -55,70 +53,66 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): def tearDown(self): [unpatch() for unpatch in self.unpatches] - @defer.inlineCallbacks def test_get_latest_event_ids_in_room(self): - create = yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.replicate() - yield self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) + create = self.persist(type="m.room.create", key="", creator=USER_ID) + self.replicate() + self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) - join = yield self.persist( + join = self.persist( type="m.room.member", key=USER_ID, membership="join", prev_events=[(create.event_id, {})], ) - yield self.replicate() - yield self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) + self.replicate() + self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) - @defer.inlineCallbacks def test_redactions(self): - yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.persist(type="m.room.member", key=USER_ID, membership="join") + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") - msg = yield self.persist(type="m.room.message", msgtype="m.text", body="Hello") - yield self.replicate() - yield self.check("get_event", [msg.event_id], msg) + msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") + self.replicate() + self.check("get_event", [msg.event_id], msg) - redaction = yield self.persist(type="m.room.redaction", redacts=msg.event_id) - yield self.replicate() + redaction = self.persist(type="m.room.redaction", redacts=msg.event_id) + self.replicate() msg_dict = msg.get_dict() msg_dict["content"] = {} msg_dict["unsigned"]["redacted_by"] = redaction.event_id msg_dict["unsigned"]["redacted_because"] = redaction redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict()) - yield self.check("get_event", [msg.event_id], redacted) + self.check("get_event", [msg.event_id], redacted) - @defer.inlineCallbacks def test_backfilled_redactions(self): - yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.persist(type="m.room.member", key=USER_ID, membership="join") + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") - msg = yield self.persist(type="m.room.message", msgtype="m.text", body="Hello") - yield self.replicate() - yield self.check("get_event", [msg.event_id], msg) + msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") + self.replicate() + self.check("get_event", [msg.event_id], msg) - redaction = yield self.persist( + redaction = self.persist( type="m.room.redaction", redacts=msg.event_id, backfill=True ) - yield self.replicate() + self.replicate() msg_dict = msg.get_dict() msg_dict["content"] = {} msg_dict["unsigned"]["redacted_by"] = redaction.event_id msg_dict["unsigned"]["redacted_because"] = redaction redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict()) - yield self.check("get_event", [msg.event_id], redacted) + self.check("get_event", [msg.event_id], redacted) - @defer.inlineCallbacks def test_invites(self): - yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.check("get_invited_rooms_for_user", [USER_ID_2], []) - event = yield self.persist( - type="m.room.member", key=USER_ID_2, membership="invite" - ) - yield self.replicate() - yield self.check( + self.persist(type="m.room.create", key="", creator=USER_ID) + self.check("get_invited_rooms_for_user", [USER_ID_2], []) + event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite") + + self.replicate() + + self.check( "get_invited_rooms_for_user", [USER_ID_2], [ @@ -132,37 +126,34 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): ], ) - @defer.inlineCallbacks def test_push_actions_for_user(self): - yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.persist(type="m.room.join", key=USER_ID, membership="join") - yield self.persist( + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.join", key=USER_ID, membership="join") + self.persist( type="m.room.join", sender=USER_ID, key=USER_ID_2, membership="join" ) - event1 = yield self.persist( - type="m.room.message", msgtype="m.text", body="hello" - ) - yield self.replicate() - yield self.check( + event1 = self.persist(type="m.room.message", msgtype="m.text", body="hello") + self.replicate() + self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], {"highlight_count": 0, "notify_count": 0}, ) - yield self.persist( + self.persist( type="m.room.message", msgtype="m.text", body="world", push_actions=[(USER_ID_2, ["notify"])], ) - yield self.replicate() - yield self.check( + self.replicate() + self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], {"highlight_count": 0, "notify_count": 1}, ) - yield self.persist( + self.persist( type="m.room.message", msgtype="m.text", body="world", @@ -170,8 +161,8 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): (USER_ID_2, ["notify", {"set_tweak": "highlight", "value": True}]) ], ) - yield self.replicate() - yield self.check( + self.replicate() + self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], {"highlight_count": 1, "notify_count": 2}, @@ -179,7 +170,6 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): event_id = 0 - @defer.inlineCallbacks def persist( self, sender=USER_ID, @@ -206,8 +196,8 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): depth = self.event_id if not prev_events: - latest_event_ids = yield self.master_store.get_latest_event_ids_in_room( - room_id + latest_event_ids = self.get_success( + self.master_store.get_latest_event_ids_in_room(room_id) ) prev_events = [(ev_id, {}) for ev_id in latest_event_ids] @@ -240,19 +230,23 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): ) else: state_handler = self.hs.get_state_handler() - context = yield state_handler.compute_event_context(event) + context = self.get_success(state_handler.compute_event_context(event)) - yield self.master_store.add_push_actions_to_staging( + self.master_store.add_push_actions_to_staging( event.event_id, {user_id: actions for user_id, actions in push_actions} ) ordering = None if backfill: - yield self.master_store.persist_events([(event, context)], backfilled=True) + self.get_success( + self.master_store.persist_events([(event, context)], backfilled=True) + ) else: - ordering, _ = yield self.master_store.persist_event(event, context) + ordering, _ = self.get_success( + self.master_store.persist_event(event, context) + ) if ordering: event.internal_metadata.stream_ordering = ordering - defer.returnValue(event) + return event diff --git a/tests/replication/slave/storage/test_receipts.py b/tests/replication/slave/storage/test_receipts.py index ae1adeded1..f47d94f690 100644 --- a/tests/replication/slave/storage/test_receipts.py +++ b/tests/replication/slave/storage/test_receipts.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from ._base import BaseSlavedStoreTestCase @@ -27,13 +25,10 @@ class SlavedReceiptTestCase(BaseSlavedStoreTestCase): STORE_TYPE = SlavedReceiptsStore - @defer.inlineCallbacks def test_receipt(self): - yield self.check("get_receipts_for_user", [USER_ID, "m.read"], {}) - yield self.master_store.insert_receipt( - ROOM_ID, "m.read", USER_ID, [EVENT_ID], {} - ) - yield self.replicate() - yield self.check( - "get_receipts_for_user", [USER_ID, "m.read"], {ROOM_ID: EVENT_ID} + self.check("get_receipts_for_user", [USER_ID, "m.read"], {}) + self.get_success( + self.master_store.insert_receipt(ROOM_ID, "m.read", USER_ID, [EVENT_ID], {}) ) + self.replicate() + self.check("get_receipts_for_user", [USER_ID, "m.read"], {ROOM_ID: EVENT_ID}) diff --git a/tests/server.py b/tests/server.py index 7dbdb7f8ea..615bba1b59 100644 --- a/tests/server.py +++ b/tests/server.py @@ -232,6 +232,7 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): clock.threadpool = ThreadPool() pool.threadpool = ThreadPool() + pool.running = True return d diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index c893990454..3f0083831b 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -37,18 +37,14 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.as_yaml_files = [] - config = Mock( - app_service_config_files=self.as_yaml_files, - event_cache_size=1, - password_providers=[], - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) + hs.config.app_service_config_files = self.as_yaml_files + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + self.as_token = "token1" self.as_url = "some_url" self.as_id = "as1" @@ -58,7 +54,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): self._add_appservice("token2", "as2", "some_url", "some_hs_token", "bob") self._add_appservice("token3", "as3", "some_url", "some_hs_token", "bob") # must be done after inserts - self.store = ApplicationServiceStore(None, hs) + self.store = ApplicationServiceStore(hs.get_db_conn(), hs) def tearDown(self): # TODO: suboptimal that we need to create files for tests! @@ -105,18 +101,16 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): def setUp(self): self.as_yaml_files = [] - config = Mock( - app_service_config_files=self.as_yaml_files, - event_cache_size=1, - password_providers=[], - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) + + hs.config.app_service_config_files = self.as_yaml_files + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + self.db_pool = hs.get_db_pool() + self.engine = hs.database_engine self.as_list = [ {"token": "token1", "url": "https://matrix-as.org", "id": "id_1"}, @@ -129,7 +123,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.as_yaml_files = [] - self.store = TestTransactionStore(None, hs) + self.store = TestTransactionStore(hs.get_db_conn(), hs) def _add_service(self, url, as_token, id): as_yaml = dict( @@ -146,29 +140,35 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.as_yaml_files.append(as_token) def _set_state(self, id, state, txn=None): - return self.db_pool.runQuery( - "INSERT INTO application_services_state(as_id, state, last_txn) " - "VALUES(?,?,?)", + return self.db_pool.runOperation( + self.engine.convert_param_style( + "INSERT INTO application_services_state(as_id, state, last_txn) " + "VALUES(?,?,?)" + ), (id, state, txn), ) def _insert_txn(self, as_id, txn_id, events): - return self.db_pool.runQuery( - "INSERT INTO application_services_txns(as_id, txn_id, event_ids) " - "VALUES(?,?,?)", + return self.db_pool.runOperation( + self.engine.convert_param_style( + "INSERT INTO application_services_txns(as_id, txn_id, event_ids) " + "VALUES(?,?,?)" + ), (as_id, txn_id, json.dumps([e.event_id for e in events])), ) def _set_last_txn(self, as_id, txn_id): - return self.db_pool.runQuery( - "INSERT INTO application_services_state(as_id, last_txn, state) " - "VALUES(?,?,?)", + return self.db_pool.runOperation( + self.engine.convert_param_style( + "INSERT INTO application_services_state(as_id, last_txn, state) " + "VALUES(?,?,?)" + ), (as_id, txn_id, ApplicationServiceState.UP), ) @defer.inlineCallbacks def test_get_appservice_state_none(self): - service = Mock(id=999) + service = Mock(id="999") state = yield self.store.get_appservice_state(service) self.assertEquals(None, state) @@ -200,7 +200,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): service = Mock(id=self.as_list[1]["id"]) yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN) rows = yield self.db_pool.runQuery( - "SELECT as_id FROM application_services_state WHERE state=?", + self.engine.convert_param_style( + "SELECT as_id FROM application_services_state WHERE state=?" + ), (ApplicationServiceState.DOWN,), ) self.assertEquals(service.id, rows[0][0]) @@ -212,7 +214,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN) yield self.store.set_appservice_state(service, ApplicationServiceState.UP) rows = yield self.db_pool.runQuery( - "SELECT as_id FROM application_services_state WHERE state=?", + self.engine.convert_param_style( + "SELECT as_id FROM application_services_state WHERE state=?" + ), (ApplicationServiceState.UP,), ) self.assertEquals(service.id, rows[0][0]) @@ -279,14 +283,19 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): yield self.store.complete_appservice_txn(txn_id=txn_id, service=service) res = yield self.db_pool.runQuery( - "SELECT last_txn FROM application_services_state WHERE as_id=?", + self.engine.convert_param_style( + "SELECT last_txn FROM application_services_state WHERE as_id=?" + ), (service.id,), ) self.assertEquals(1, len(res)) self.assertEquals(txn_id, res[0][0]) res = yield self.db_pool.runQuery( - "SELECT * FROM application_services_txns WHERE txn_id=?", (txn_id,) + self.engine.convert_param_style( + "SELECT * FROM application_services_txns WHERE txn_id=?" + ), + (txn_id,), ) self.assertEquals(0, len(res)) @@ -300,7 +309,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): yield self.store.complete_appservice_txn(txn_id=txn_id, service=service) res = yield self.db_pool.runQuery( - "SELECT last_txn, state FROM application_services_state WHERE " "as_id=?", + self.engine.convert_param_style( + "SELECT last_txn, state FROM application_services_state WHERE as_id=?" + ), (service.id,), ) self.assertEquals(1, len(res)) @@ -308,7 +319,10 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.assertEquals(ApplicationServiceState.UP, res[0][1]) res = yield self.db_pool.runQuery( - "SELECT * FROM application_services_txns WHERE txn_id=?", (txn_id,) + self.engine.convert_param_style( + "SELECT * FROM application_services_txns WHERE txn_id=?" + ), + (txn_id,), ) self.assertEquals(0, len(res)) @@ -394,37 +408,31 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f1 = self._write_config(suffix="1") f2 = self._write_config(suffix="2") - config = Mock( - app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[] - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - datastore=Mock(), - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) - ApplicationServiceStore(None, hs) + hs.config.app_service_config_files = [f1, f2] + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + + ApplicationServiceStore(hs.get_db_conn(), hs) @defer.inlineCallbacks def test_duplicate_ids(self): f1 = self._write_config(id="id", suffix="1") f2 = self._write_config(id="id", suffix="2") - config = Mock( - app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[] - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - datastore=Mock(), - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) + hs.config.app_service_config_files = [f1, f2] + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + with self.assertRaises(ConfigError) as cm: - ApplicationServiceStore(None, hs) + ApplicationServiceStore(hs.get_db_conn(), hs) e = cm.exception self.assertIn(f1, str(e)) @@ -436,19 +444,16 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f1 = self._write_config(as_token="as_token", suffix="1") f2 = self._write_config(as_token="as_token", suffix="2") - config = Mock( - app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[] - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - datastore=Mock(), - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) + hs.config.app_service_config_files = [f1, f2] + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + with self.assertRaises(ConfigError) as cm: - ApplicationServiceStore(None, hs) + ApplicationServiceStore(hs.get_db_conn(), hs) e = cm.exception self.assertIn(f1, str(e)) diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py index b4510c1c8d..4e128e1047 100644 --- a/tests/storage/test_directory.py +++ b/tests/storage/test_directory.py @@ -16,7 +16,6 @@ from twisted.internet import defer -from synapse.storage.directory import DirectoryStore from synapse.types import RoomAlias, RoomID from tests import unittest @@ -28,7 +27,7 @@ class DirectoryStoreTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver(self.addCleanup) - self.store = DirectoryStore(None, hs) + self.store = hs.get_datastore() self.room = RoomID.from_string("!abcde:test") self.alias = RoomAlias.from_string("#my-room:test") diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 2fdf34fdf6..0d4e74d637 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -37,10 +37,10 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase): ( "INSERT INTO events (" " room_id, event_id, type, depth, topological_ordering," - " content, processed, outlier) " - "VALUES (?, ?, 'm.test', ?, ?, 'test', ?, ?)" + " content, processed, outlier, stream_ordering) " + "VALUES (?, ?, 'm.test', ?, ?, 'test', ?, ?, ?)" ), - (room_id, event_id, i, i, True, False), + (room_id, event_id, i, i, True, False, i), ) txn.execute( diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index f2ed866ae7..2036287288 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -13,25 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - -import tests.unittest -import tests.utils -from tests.utils import setup_test_homeserver +from tests.unittest import HomeserverTestCase FORTY_DAYS = 40 * 24 * 60 * 60 -class MonthlyActiveUsersTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super(MonthlyActiveUsersTestCase, self).__init__(*args, **kwargs) +class MonthlyActiveUsersTestCase(HomeserverTestCase): + def make_homeserver(self, reactor, clock): - @defer.inlineCallbacks - def setUp(self): - self.hs = yield setup_test_homeserver(self.addCleanup) - self.store = self.hs.get_datastore() + hs = self.setup_test_homeserver() + self.store = hs.get_datastore() + + # Advance the clock a bit + reactor.advance(FORTY_DAYS) + + return hs - @defer.inlineCallbacks def test_initialise_reserved_users(self): self.hs.config.max_mau_value = 5 user1 = "@user1:server" @@ -44,88 +41,101 @@ class MonthlyActiveUsersTestCase(tests.unittest.TestCase): ] user_num = len(threepids) - yield self.store.register(user_id=user1, token="123", password_hash=None) - - yield self.store.register(user_id=user2, token="456", password_hash=None) + self.store.register(user_id=user1, token="123", password_hash=None) + self.store.register(user_id=user2, token="456", password_hash=None) + self.pump() now = int(self.hs.get_clock().time_msec()) - yield self.store.user_add_threepid(user1, "email", user1_email, now, now) - yield self.store.user_add_threepid(user2, "email", user2_email, now, now) - yield self.store.initialise_reserved_users(threepids) + self.store.user_add_threepid(user1, "email", user1_email, now, now) + self.store.user_add_threepid(user2, "email", user2_email, now, now) + self.store.initialise_reserved_users(threepids) + self.pump() - active_count = yield self.store.get_monthly_active_count() + active_count = self.store.get_monthly_active_count() # Test total counts - self.assertEquals(active_count, user_num) + self.assertEquals(self.get_success(active_count), user_num) # Test user is marked as active - - timestamp = yield self.store.user_last_seen_monthly_active(user1) - self.assertTrue(timestamp) - timestamp = yield self.store.user_last_seen_monthly_active(user2) - self.assertTrue(timestamp) + timestamp = self.store.user_last_seen_monthly_active(user1) + self.assertTrue(self.get_success(timestamp)) + timestamp = self.store.user_last_seen_monthly_active(user2) + self.assertTrue(self.get_success(timestamp)) # Test that users are never removed from the db. self.hs.config.max_mau_value = 0 - self.hs.get_clock().advance_time(FORTY_DAYS) + self.reactor.advance(FORTY_DAYS) - yield self.store.reap_monthly_active_users() + self.store.reap_monthly_active_users() + self.pump() - active_count = yield self.store.get_monthly_active_count() - self.assertEquals(active_count, user_num) + active_count = self.store.get_monthly_active_count() + self.assertEquals(self.get_success(active_count), user_num) # Test that regalar users are removed from the db ru_count = 2 - yield self.store.upsert_monthly_active_user("@ru1:server") - yield self.store.upsert_monthly_active_user("@ru2:server") - active_count = yield self.store.get_monthly_active_count() + self.store.upsert_monthly_active_user("@ru1:server") + self.store.upsert_monthly_active_user("@ru2:server") + self.pump() - self.assertEqual(active_count, user_num + ru_count) + active_count = self.store.get_monthly_active_count() + self.assertEqual(self.get_success(active_count), user_num + ru_count) self.hs.config.max_mau_value = user_num - yield self.store.reap_monthly_active_users() + self.store.reap_monthly_active_users() + self.pump() - active_count = yield self.store.get_monthly_active_count() - self.assertEquals(active_count, user_num) + active_count = self.store.get_monthly_active_count() + self.assertEquals(self.get_success(active_count), user_num) - @defer.inlineCallbacks def test_can_insert_and_count_mau(self): - count = yield self.store.get_monthly_active_count() - self.assertEqual(0, count) + count = self.store.get_monthly_active_count() + self.assertEqual(0, self.get_success(count)) - yield self.store.upsert_monthly_active_user("@user:server") - count = yield self.store.get_monthly_active_count() + self.store.upsert_monthly_active_user("@user:server") + self.pump() - self.assertEqual(1, count) + count = self.store.get_monthly_active_count() + self.assertEqual(1, self.get_success(count)) - @defer.inlineCallbacks def test_user_last_seen_monthly_active(self): user_id1 = "@user1:server" user_id2 = "@user2:server" user_id3 = "@user3:server" - result = yield self.store.user_last_seen_monthly_active(user_id1) - self.assertFalse(result == 0) - yield self.store.upsert_monthly_active_user(user_id1) - yield self.store.upsert_monthly_active_user(user_id2) - result = yield self.store.user_last_seen_monthly_active(user_id1) - self.assertTrue(result > 0) - result = yield self.store.user_last_seen_monthly_active(user_id3) - self.assertFalse(result == 0) + result = self.store.user_last_seen_monthly_active(user_id1) + self.assertFalse(self.get_success(result) == 0) + + self.store.upsert_monthly_active_user(user_id1) + self.store.upsert_monthly_active_user(user_id2) + self.pump() + + result = self.store.user_last_seen_monthly_active(user_id1) + self.assertGreater(self.get_success(result), 0) + + result = self.store.user_last_seen_monthly_active(user_id3) + self.assertNotEqual(self.get_success(result), 0) - @defer.inlineCallbacks def test_reap_monthly_active_users(self): self.hs.config.max_mau_value = 5 initial_users = 10 for i in range(initial_users): - yield self.store.upsert_monthly_active_user("@user%d:server" % i) - count = yield self.store.get_monthly_active_count() - self.assertTrue(count, initial_users) - yield self.store.reap_monthly_active_users() - count = yield self.store.get_monthly_active_count() - self.assertEquals(count, initial_users - self.hs.config.max_mau_value) + self.store.upsert_monthly_active_user("@user%d:server" % i) + self.pump() - self.hs.get_clock().advance_time(FORTY_DAYS) - yield self.store.reap_monthly_active_users() - count = yield self.store.get_monthly_active_count() - self.assertEquals(count, 0) + count = self.store.get_monthly_active_count() + self.assertTrue(self.get_success(count), initial_users) + + self.store.reap_monthly_active_users() + self.pump() + count = self.store.get_monthly_active_count() + self.assertEquals( + self.get_success(count), initial_users - self.hs.config.max_mau_value + ) + + self.reactor.advance(FORTY_DAYS) + self.store.reap_monthly_active_users() + self.pump() + + count = self.store.get_monthly_active_count() + self.assertEquals(self.get_success(count), 0) diff --git a/tests/storage/test_presence.py b/tests/storage/test_presence.py index b5b58ff660..c7a63f39b9 100644 --- a/tests/storage/test_presence.py +++ b/tests/storage/test_presence.py @@ -16,19 +16,18 @@ from twisted.internet import defer -from synapse.storage.presence import PresenceStore from synapse.types import UserID from tests import unittest -from tests.utils import MockClock, setup_test_homeserver +from tests.utils import setup_test_homeserver class PresenceStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - hs = yield setup_test_homeserver(self.addCleanup, clock=MockClock()) + hs = yield setup_test_homeserver(self.addCleanup) - self.store = PresenceStore(None, hs) + self.store = hs.get_datastore() self.u_apple = UserID.from_string("@apple:test") self.u_banana = UserID.from_string("@banana:test") diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index a1f6618bf9..45824bd3b2 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -28,7 +28,7 @@ class ProfileStoreTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver(self.addCleanup) - self.store = ProfileStore(None, hs) + self.store = ProfileStore(hs.get_db_conn(), hs) self.u_frank = UserID.from_string("@frank:test") diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index b46e0ea7e2..0dde1ab2fe 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -30,7 +30,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.hs = yield setup_test_homeserver(self.addCleanup) - self.store = UserDirectoryStore(None, self.hs) + self.store = UserDirectoryStore(self.hs.get_db_conn(), self.hs) # alice and bob are both in !room_id. bobby is not but shares # a homeserver with alice. diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 8d8ce0cab9..2eea3b098b 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -96,7 +96,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): events_to_filter.append(evt) # the erasey user gets erased - self.hs.get_datastore().mark_user_erased("@erased:local_hs") + yield self.hs.get_datastore().mark_user_erased("@erased:local_hs") # ... and the filtering happens. filtered = yield filter_events_for_server( diff --git a/tests/unittest.py b/tests/unittest.py index 8b513bb32b..a3d39920db 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -22,6 +22,7 @@ from canonicaljson import json import twisted import twisted.logger +from twisted.internet.defer import Deferred from twisted.trial import unittest from synapse.http.server import JsonResource @@ -281,12 +282,14 @@ class HomeserverTestCase(TestCase): kwargs.update(self._hs_args) return setup_test_homeserver(self.addCleanup, *args, **kwargs) - def pump(self): + def pump(self, by=0.0): """ Pump the reactor enough that Deferreds will fire. """ - self.reactor.pump([0.0] * 100) + self.reactor.pump([by] * 100) def get_success(self, d): + if not isinstance(d, Deferred): + return d self.pump() return self.successResultOf(d) diff --git a/tests/utils.py b/tests/utils.py index 63e30dc6c0..114470d641 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -30,8 +30,8 @@ from synapse.config.server import ServerConfig from synapse.federation.transport import server from synapse.http.server import HttpServer from synapse.server import HomeServer -from synapse.storage import DataStore, PostgresEngine -from synapse.storage.engines import create_engine +from synapse.storage import DataStore +from synapse.storage.engines import PostgresEngine, create_engine from synapse.storage.prepare_database import ( _get_or_create_schema_state, _setup_new_database, @@ -42,6 +42,7 @@ from synapse.util.ratelimitutils import FederationRateLimiter # set this to True to run the tests against postgres instead of sqlite. USE_POSTGRES_FOR_TESTS = os.environ.get("SYNAPSE_POSTGRES", False) +LEAVE_DB = os.environ.get("SYNAPSE_LEAVE_DB", False) POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", "postgres") POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),) @@ -244,8 +245,9 @@ def setup_test_homeserver( cur.close() db_conn.close() - # Register the cleanup hook - cleanup_func(cleanup) + if not LEAVE_DB: + # Register the cleanup hook + cleanup_func(cleanup) hs.setup() else: From b13836da7f5951bfe3973819b5affa12805185cf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Sep 2018 17:22:49 +0100 Subject: [PATCH 024/138] Remove conn_id from repl prometheus metrics `conn_id` gets set to a random string, and so we end up filling up prometheus with tonnes of data series, which is bad. --- synapse/replication/tcp/protocol.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 74e892c104..0d28152703 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -607,9 +607,9 @@ def transport_buffer_size(protocol): transport_send_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_send_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections + (p.name,): transport_buffer_size(p) for p in connected_connections }, ) @@ -632,9 +632,9 @@ def transport_kernel_read_buffer_size(protocol, read=True): tcp_transport_kernel_send_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_kernel_send_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_kernel_read_buffer_size(p, False) + (p.name,): transport_kernel_read_buffer_size(p, False) for p in connected_connections }, ) @@ -643,9 +643,9 @@ tcp_transport_kernel_send_buffer = LaterGauge( tcp_transport_kernel_read_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_kernel_read_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_kernel_read_buffer_size(p, True) + (p.name,): transport_kernel_read_buffer_size(p, True) for p in connected_connections }, ) @@ -654,9 +654,9 @@ tcp_transport_kernel_read_buffer = LaterGauge( tcp_inbound_commands = LaterGauge( "synapse_replication_tcp_protocol_inbound_commands", "", - ["command", "name", "conn_id"], + ["command", "name"], lambda: { - (k[0], p.name, p.conn_id): count + (k[0], p.name,): count for p in connected_connections for k, count in iteritems(p.inbound_commands_counter) }, @@ -665,9 +665,9 @@ tcp_inbound_commands = LaterGauge( tcp_outbound_commands = LaterGauge( "synapse_replication_tcp_protocol_outbound_commands", "", - ["command", "name", "conn_id"], + ["command", "name"], lambda: { - (k[0], p.name, p.conn_id): count + (k[0], p.name,): count for p in connected_connections for k, count in iteritems(p.outbound_commands_counter) }, From 87b111f96a244cd212e8fff7f1a0f661fdd8073e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Sep 2018 17:26:15 +0100 Subject: [PATCH 025/138] Newsfile --- changelog.d/3788.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3788.bugfix diff --git a/changelog.d/3788.bugfix b/changelog.d/3788.bugfix new file mode 100644 index 0000000000..72316fb881 --- /dev/null +++ b/changelog.d/3788.bugfix @@ -0,0 +1 @@ +Remove connection ID for replication prometheus metrics, as it creates a large number of new series. From 3e242dc14976bf455717c5f631a997a2c0e5f2c4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 4 Sep 2018 11:45:52 +0100 Subject: [PATCH 026/138] Remove conn_id --- synapse/replication/tcp/protocol.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 0d28152703..5dc7b3fffc 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -590,9 +590,9 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): pending_commands = LaterGauge( "synapse_replication_tcp_protocol_pending_commands", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections + (p.name,): len(p.pending_commands) for p in connected_connections }, ) From c42f7fd7b9f99923d71ad064fff7d75a5fc3cd93 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 4 Sep 2018 12:03:17 +0100 Subject: [PATCH 027/138] improve human readable error messages --- synapse/rest/client/v2_alpha/account.py | 16 ++++++++++++---- synapse/rest/client/v2_alpha/register.py | 11 ++++++++--- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 372648cafd..37b32dd37b 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -53,7 +53,9 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -89,7 +91,9 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Account phone numbers are not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( @@ -241,7 +245,9 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( @@ -276,7 +282,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Account phone numbers are not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 2fb4d43ccb..200b29fa38 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -75,7 +75,9 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized to register on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -115,7 +117,9 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Phone numbers are not authorized to register on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -373,7 +377,8 @@ class RegisterRestServlet(RestServlet): if not check_3pid_allowed(self.hs, medium, address): raise SynapseError( - 403, "Third party identifier is not allowed", + 403, + "Third party identifier (email/phone number) is not allowed", Codes.THREEPID_DENIED, ) From bae37cd811fd71c68d28eb300eb0b74a5b6a1c27 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 4 Sep 2018 12:07:00 +0100 Subject: [PATCH 028/138] improve human readable error message --- synapse/rest/client/v2_alpha/register.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 200b29fa38..192f52e462 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -378,7 +378,8 @@ class RegisterRestServlet(RestServlet): if not check_3pid_allowed(self.hs, medium, address): raise SynapseError( 403, - "Third party identifier (email/phone number) is not allowed", + "Third party identifiers (email/phone numbers)" + + " are not authorized on this server", Codes.THREEPID_DENIED, ) From 70fc599ede85bdeb66ac94bea6f85f3f922f7ca5 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 4 Sep 2018 12:08:27 +0100 Subject: [PATCH 029/138] towncrier --- changelog.d/3789.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3789.misc diff --git a/changelog.d/3789.misc b/changelog.d/3789.misc new file mode 100644 index 0000000000..d2d5d91091 --- /dev/null +++ b/changelog.d/3789.misc @@ -0,0 +1 @@ +Improve human readable error messages for threepid registration/account update From 7e9ced417846307d88075ed34d7764717a32def0 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 4 Sep 2018 21:12:04 +1000 Subject: [PATCH 030/138] version and towncrier --- CHANGES.md | 44 ++++++++++++++++++++++++++++++++++++++++ changelog.d/3378.misc | 1 - changelog.d/3659.feature | 1 - changelog.d/3673.misc | 1 - changelog.d/3680.feature | 1 - changelog.d/3722.bugfix | 1 - changelog.d/3724.feature | 1 - changelog.d/3725.misc | 1 - changelog.d/3726.misc | 1 - changelog.d/3727.misc | 1 - changelog.d/3730.misc | 1 - changelog.d/3734.misc | 1 - changelog.d/3735.misc | 1 - changelog.d/3737.misc | 1 - changelog.d/3740.misc | 1 - changelog.d/3746.misc | 1 - changelog.d/3747.bugfix | 1 - changelog.d/3749.feature | 1 - changelog.d/3751.feature | 1 - changelog.d/3753.bugfix | 1 - changelog.d/3754.bugfix | 1 - changelog.d/3755.bugfix | 1 - changelog.d/3756.bugfix | 1 - changelog.d/3758.bugfix | 1 - changelog.d/3760.bugfix | 1 - changelog.d/3764.misc | 1 - changelog.d/3768.bugfix | 1 - changelog.d/3777.bugfix | 1 - synapse/__init__.py | 2 +- 29 files changed, 45 insertions(+), 28 deletions(-) delete mode 100644 changelog.d/3378.misc delete mode 100644 changelog.d/3659.feature delete mode 100644 changelog.d/3673.misc delete mode 100644 changelog.d/3680.feature delete mode 100644 changelog.d/3722.bugfix delete mode 100644 changelog.d/3724.feature delete mode 100644 changelog.d/3725.misc delete mode 100644 changelog.d/3726.misc delete mode 100644 changelog.d/3727.misc delete mode 100644 changelog.d/3730.misc delete mode 100644 changelog.d/3734.misc delete mode 100644 changelog.d/3735.misc delete mode 100644 changelog.d/3737.misc delete mode 100644 changelog.d/3740.misc delete mode 100644 changelog.d/3746.misc delete mode 100644 changelog.d/3747.bugfix delete mode 100644 changelog.d/3749.feature delete mode 100644 changelog.d/3751.feature delete mode 100644 changelog.d/3753.bugfix delete mode 100644 changelog.d/3754.bugfix delete mode 100644 changelog.d/3755.bugfix delete mode 100644 changelog.d/3756.bugfix delete mode 100644 changelog.d/3758.bugfix delete mode 100644 changelog.d/3760.bugfix delete mode 100644 changelog.d/3764.misc delete mode 100644 changelog.d/3768.bugfix delete mode 100644 changelog.d/3777.bugfix diff --git a/CHANGES.md b/CHANGES.md index a35f5aebc7..c2b1e70488 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,47 @@ +Synapse 0.33.4rc1 (2018-09-04) +============================== + +Features +-------- + +- Support profile API endpoints on workers ([\#3659](https://github.com/matrix-org/synapse/issues/3659)) +- Server notices for resource limit blocking ([\#3680](https://github.com/matrix-org/synapse/issues/3680)) +- Allow guests to use /rooms/:roomId/event/:eventId ([\#3724](https://github.com/matrix-org/synapse/issues/3724)) +- Add mau_trial_days config param, so that users only get counted as MAU after N days. ([\#3749](https://github.com/matrix-org/synapse/issues/3749)) +- Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). ([\#3751](https://github.com/matrix-org/synapse/issues/3751)) + + +Bugfixes +-------- + +- Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues ([\#3722](https://github.com/matrix-org/synapse/issues/3722)) +- Fix bug where we resent "limit exceeded" server notices repeatedly ([\#3747](https://github.com/matrix-org/synapse/issues/3747)) +- Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices ([\#3753](https://github.com/matrix-org/synapse/issues/3753)) +- Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic ([\#3754](https://github.com/matrix-org/synapse/issues/3754)) +- Fix tagging of server notice rooms ([\#3755](https://github.com/matrix-org/synapse/issues/3755), [\#3756](https://github.com/matrix-org/synapse/issues/3756)) +- Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec. ([\#3758](https://github.com/matrix-org/synapse/issues/3758)) +- Don't return non-LL-member state in incremental sync state blocks ([\#3760](https://github.com/matrix-org/synapse/issues/3760)) +- Fix bug in sending presence over federation ([\#3768](https://github.com/matrix-org/synapse/issues/3768)) +- Fix bug where preserved threepid user comes to sign up and server is mau blocked ([\#3777](https://github.com/matrix-org/synapse/issues/3777)) + + +Internal Changes +---------------- + +- Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. ([\#3378](https://github.com/matrix-org/synapse/issues/3378)) +- Refactor state module to support multiple room versions ([\#3673](https://github.com/matrix-org/synapse/issues/3673)) +- The synapse.storage module has been ported to Python 3. ([\#3725](https://github.com/matrix-org/synapse/issues/3725)) +- Split the state_group_cache into member and non-member state events (and so speed up LL /sync) ([\#3726](https://github.com/matrix-org/synapse/issues/3726)) +- Log failure to authenticate remote servers as warnings (without stack traces) ([\#3727](https://github.com/matrix-org/synapse/issues/3727)) +- The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. ([\#3730](https://github.com/matrix-org/synapse/issues/3730)) +- Reference the need for an HTTP replication port when using the federation_reader worker ([\#3734](https://github.com/matrix-org/synapse/issues/3734)) +- Fix minor spelling error in federation client documentation. ([\#3735](https://github.com/matrix-org/synapse/issues/3735)) +- Remove redundant state resolution function ([\#3737](https://github.com/matrix-org/synapse/issues/3737)) +- The test suite now passes on PostgreSQL. ([\#3740](https://github.com/matrix-org/synapse/issues/3740)) +- Fix MAU cache invalidation due to missing yield ([\#3746](https://github.com/matrix-org/synapse/issues/3746)) +- Make sure that we close db connections opened during init ([\#3764](https://github.com/matrix-org/synapse/issues/3764)) + + Synapse 0.33.3 (2018-08-22) =========================== diff --git a/changelog.d/3378.misc b/changelog.d/3378.misc deleted file mode 100644 index 8f88f88e69..0000000000 --- a/changelog.d/3378.misc +++ /dev/null @@ -1 +0,0 @@ -Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. diff --git a/changelog.d/3659.feature b/changelog.d/3659.feature deleted file mode 100644 index a5b4821c09..0000000000 --- a/changelog.d/3659.feature +++ /dev/null @@ -1 +0,0 @@ -Support profile API endpoints on workers diff --git a/changelog.d/3673.misc b/changelog.d/3673.misc deleted file mode 100644 index d672111fb9..0000000000 --- a/changelog.d/3673.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor state module to support multiple room versions diff --git a/changelog.d/3680.feature b/changelog.d/3680.feature deleted file mode 100644 index 4edaaf76a8..0000000000 --- a/changelog.d/3680.feature +++ /dev/null @@ -1 +0,0 @@ -Server notices for resource limit blocking diff --git a/changelog.d/3722.bugfix b/changelog.d/3722.bugfix deleted file mode 100644 index 16cbaf76cb..0000000000 --- a/changelog.d/3722.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues diff --git a/changelog.d/3724.feature b/changelog.d/3724.feature deleted file mode 100644 index 1b374ccf47..0000000000 --- a/changelog.d/3724.feature +++ /dev/null @@ -1 +0,0 @@ -Allow guests to use /rooms/:roomId/event/:eventId diff --git a/changelog.d/3725.misc b/changelog.d/3725.misc deleted file mode 100644 index 91ab9d7137..0000000000 --- a/changelog.d/3725.misc +++ /dev/null @@ -1 +0,0 @@ -The synapse.storage module has been ported to Python 3. diff --git a/changelog.d/3726.misc b/changelog.d/3726.misc deleted file mode 100644 index c4f66ec998..0000000000 --- a/changelog.d/3726.misc +++ /dev/null @@ -1 +0,0 @@ -Split the state_group_cache into member and non-member state events (and so speed up LL /sync) diff --git a/changelog.d/3727.misc b/changelog.d/3727.misc deleted file mode 100644 index 0b83220d90..0000000000 --- a/changelog.d/3727.misc +++ /dev/null @@ -1 +0,0 @@ -Log failure to authenticate remote servers as warnings (without stack traces) diff --git a/changelog.d/3730.misc b/changelog.d/3730.misc deleted file mode 100644 index b1ea84f732..0000000000 --- a/changelog.d/3730.misc +++ /dev/null @@ -1 +0,0 @@ -The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. diff --git a/changelog.d/3734.misc b/changelog.d/3734.misc deleted file mode 100644 index 4f6e4b3848..0000000000 --- a/changelog.d/3734.misc +++ /dev/null @@ -1 +0,0 @@ -Reference the need for an HTTP replication port when using the federation_reader worker diff --git a/changelog.d/3735.misc b/changelog.d/3735.misc deleted file mode 100644 index f17004be71..0000000000 --- a/changelog.d/3735.misc +++ /dev/null @@ -1 +0,0 @@ -Fix minor spelling error in federation client documentation. diff --git a/changelog.d/3737.misc b/changelog.d/3737.misc deleted file mode 100644 index 0361da4ebc..0000000000 --- a/changelog.d/3737.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant state resolution function diff --git a/changelog.d/3740.misc b/changelog.d/3740.misc deleted file mode 100644 index 4dcb7fb5de..0000000000 --- a/changelog.d/3740.misc +++ /dev/null @@ -1 +0,0 @@ -The test suite now passes on PostgreSQL. diff --git a/changelog.d/3746.misc b/changelog.d/3746.misc deleted file mode 100644 index fc00ee773a..0000000000 --- a/changelog.d/3746.misc +++ /dev/null @@ -1 +0,0 @@ -Fix MAU cache invalidation due to missing yield diff --git a/changelog.d/3747.bugfix b/changelog.d/3747.bugfix deleted file mode 100644 index c41e2a1213..0000000000 --- a/changelog.d/3747.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we resent "limit exceeded" server notices repeatedly diff --git a/changelog.d/3749.feature b/changelog.d/3749.feature deleted file mode 100644 index 9f8837b106..0000000000 --- a/changelog.d/3749.feature +++ /dev/null @@ -1 +0,0 @@ -Add mau_trial_days config param, so that users only get counted as MAU after N days. diff --git a/changelog.d/3751.feature b/changelog.d/3751.feature deleted file mode 100644 index dc9742b15b..0000000000 --- a/changelog.d/3751.feature +++ /dev/null @@ -1 +0,0 @@ -Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). diff --git a/changelog.d/3753.bugfix b/changelog.d/3753.bugfix deleted file mode 100644 index b4301267df..0000000000 --- a/changelog.d/3753.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices diff --git a/changelog.d/3754.bugfix b/changelog.d/3754.bugfix deleted file mode 100644 index 6e3ec80194..0000000000 --- a/changelog.d/3754.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic diff --git a/changelog.d/3755.bugfix b/changelog.d/3755.bugfix deleted file mode 100644 index 6a1f83f0ce..0000000000 --- a/changelog.d/3755.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tagging of server notice rooms diff --git a/changelog.d/3756.bugfix b/changelog.d/3756.bugfix deleted file mode 100644 index 6a1f83f0ce..0000000000 --- a/changelog.d/3756.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tagging of server notice rooms diff --git a/changelog.d/3758.bugfix b/changelog.d/3758.bugfix deleted file mode 100644 index 862739bfe8..0000000000 --- a/changelog.d/3758.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec. diff --git a/changelog.d/3760.bugfix b/changelog.d/3760.bugfix deleted file mode 100644 index ce61fb8a2b..0000000000 --- a/changelog.d/3760.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't return non-LL-member state in incremental sync state blocks diff --git a/changelog.d/3764.misc b/changelog.d/3764.misc deleted file mode 100644 index f3614f1982..0000000000 --- a/changelog.d/3764.misc +++ /dev/null @@ -1 +0,0 @@ -Make sure that we close db connections opened during init \ No newline at end of file diff --git a/changelog.d/3768.bugfix b/changelog.d/3768.bugfix deleted file mode 100644 index a039a7fa68..0000000000 --- a/changelog.d/3768.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in sending presence over federation diff --git a/changelog.d/3777.bugfix b/changelog.d/3777.bugfix deleted file mode 100644 index 46efc543a7..0000000000 --- a/changelog.d/3777.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where preserved threepid user comes to sign up and server is mau blocked diff --git a/synapse/__init__.py b/synapse/__init__.py index e62901b761..2c42893673 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.33.3" +__version__ = "0.33.4rc1" From 87c18d12ee30c84a108ac686454720d6ac6a2f84 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Sep 2018 15:18:25 +0100 Subject: [PATCH 031/138] Implement 'event_format' filter param in /sync This has been specced and part-implemented; let's implement it for /sync (but no other endpoints yet :/). --- synapse/api/filtering.py | 1 + synapse/rest/client/v2_alpha/sync.py | 51 +++++++++++++++++++++------- 2 files changed, 39 insertions(+), 13 deletions(-) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 186831e118..a31a9a17e0 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -251,6 +251,7 @@ class FilterCollection(object): "include_leave", False ) self.event_fields = filter_json.get("event_fields", []) + self.event_format = filter_json.get("event_format", "client") def __repr__(self): return "" % (json.dumps(self._filter_json),) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 1275baa1ba..263d8eb73e 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -25,6 +25,7 @@ from synapse.api.errors import SynapseError from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection from synapse.events.utils import ( format_event_for_client_v2_without_room_id, + format_event_raw, serialize_event, ) from synapse.handlers.presence import format_user_presence_state @@ -175,17 +176,28 @@ class SyncRestServlet(RestServlet): @staticmethod def encode_response(time_now, sync_result, access_token_id, filter): + if filter.event_format == 'client': + event_formatter = format_event_for_client_v2_without_room_id + elif filter.event_format == 'federation': + event_formatter = format_event_raw + else: + raise Exception("Unknown event format %s" % (filter.event_format, )) + joined = SyncRestServlet.encode_joined( - sync_result.joined, time_now, access_token_id, filter.event_fields + sync_result.joined, time_now, access_token_id, + filter.event_fields, + event_formatter, ) invited = SyncRestServlet.encode_invited( sync_result.invited, time_now, access_token_id, + event_formatter, ) archived = SyncRestServlet.encode_archived( sync_result.archived, time_now, access_token_id, filter.event_fields, + event_formatter, ) return { @@ -228,7 +240,7 @@ class SyncRestServlet(RestServlet): } @staticmethod - def encode_joined(rooms, time_now, token_id, event_fields): + def encode_joined(rooms, time_now, token_id, event_fields, event_formatter): """ Encode the joined rooms in a sync result @@ -240,7 +252,9 @@ class SyncRestServlet(RestServlet): token_id(int): ID of the user's auth token - used for namespacing of transaction IDs event_fields(list): List of event fields to include. If empty, - all fields will be returned. + all fields will be returned. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: the joined rooms list, in our response format @@ -248,13 +262,14 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = SyncRestServlet.encode_room( - room, time_now, token_id, only_fields=event_fields + room, time_now, token_id, joined=True, only_fields=event_fields, + event_formatter=event_formatter, ) return joined @staticmethod - def encode_invited(rooms, time_now, token_id): + def encode_invited(rooms, time_now, token_id, event_formatter): """ Encode the invited rooms in a sync result @@ -264,7 +279,9 @@ class SyncRestServlet(RestServlet): time_now(int): current time - used as a baseline for age calculations token_id(int): ID of the user's auth token - used for namespacing - of transaction IDs + of transaction IDs + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: the invited rooms list, in our @@ -274,7 +291,7 @@ class SyncRestServlet(RestServlet): for room in rooms: invite = serialize_event( room.invite, time_now, token_id=token_id, - event_format=format_event_for_client_v2_without_room_id, + event_format=event_formatter, is_invite=True, ) unsigned = dict(invite.get("unsigned", {})) @@ -288,7 +305,7 @@ class SyncRestServlet(RestServlet): return invited @staticmethod - def encode_archived(rooms, time_now, token_id, event_fields): + def encode_archived(rooms, time_now, token_id, event_fields, event_formatter): """ Encode the archived rooms in a sync result @@ -300,7 +317,9 @@ class SyncRestServlet(RestServlet): token_id(int): ID of the user's auth token - used for namespacing of transaction IDs event_fields(list): List of event fields to include. If empty, - all fields will be returned. + all fields will be returned. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: The invited rooms list, in our response format @@ -308,13 +327,18 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = SyncRestServlet.encode_room( - room, time_now, token_id, joined=False, only_fields=event_fields + room, time_now, token_id, joined=False, + only_fields=event_fields, + event_formatter=event_formatter, ) return joined @staticmethod - def encode_room(room, time_now, token_id, joined=True, only_fields=None): + def encode_room( + room, time_now, token_id, joined, + only_fields, event_formatter, + ): """ Args: room (JoinedSyncResult|ArchivedSyncResult): sync result for a @@ -326,14 +350,15 @@ class SyncRestServlet(RestServlet): joined (bool): True if the user is joined to this room - will mean we handle ephemeral events only_fields(list): Optional. The list of event fields to include. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, object]: the room, encoded in our response format """ def serialize(event): - # TODO(mjark): Respect formatting requirements in the filter. return serialize_event( event, time_now, token_id=token_id, - event_format=format_event_for_client_v2_without_room_id, + event_format=event_formatter, only_event_fields=only_fields, ) From c91bd295f58947d1cded7dc985b7ae55b99b3a4f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Sep 2018 15:23:19 +0100 Subject: [PATCH 032/138] changelog --- changelog.d/3790.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3790.feature diff --git a/changelog.d/3790.feature b/changelog.d/3790.feature new file mode 100644 index 0000000000..2c4ac62fb5 --- /dev/null +++ b/changelog.d/3790.feature @@ -0,0 +1 @@ +Implement `event_format` filter param in `/sync` From 804dd41e18c449e711e443398b95c9f6c68b6fa2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Sep 2018 01:09:12 +0100 Subject: [PATCH 033/138] Check that signatures on events are valid We should check that both the sender's server, and the server which created the event_id (which may be different from whatever the remote server has told us the origin is), have signed the event. --- synapse/federation/federation_base.py | 126 ++++++++++++++++++++++---- 1 file changed, 110 insertions(+), 16 deletions(-) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index c11798093d..5be8e66fb8 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -13,17 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from collections import namedtuple import six from twisted.internet import defer +from twisted.internet.defer import DeferredList -from synapse.api.constants import MAX_DEPTH +from synapse.api.constants import MAX_DEPTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError from synapse.crypto.event_signing import check_event_content_hash from synapse.events import FrozenEvent from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict +from synapse.types import get_domain_from_id from synapse.util import logcontext, unwrapFirstError logger = logging.getLogger(__name__) @@ -133,34 +136,25 @@ class FederationBase(object): * throws a SynapseError if the signature check failed. The deferreds run their callbacks in the sentinel logcontext. """ - - redacted_pdus = [ - prune_event(pdu) - for pdu in pdus - ] - - deferreds = self.keyring.verify_json_objects_for_server([ - (p.origin, p.get_pdu_json()) - for p in redacted_pdus - ]) + deferreds = _check_sigs_on_pdus(self.keyring, pdus) ctx = logcontext.LoggingContext.current_context() - def callback(_, pdu, redacted): + def callback(_, pdu): with logcontext.PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): logger.warn( "Event content has been tampered, redacting %s: %s", pdu.event_id, pdu.get_pdu_json() ) - return redacted + return prune_event(pdu) if self.spam_checker.check_event_for_spam(pdu): logger.warn( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json() ) - return redacted + return prune_event(pdu) return pdu @@ -173,16 +167,116 @@ class FederationBase(object): ) return failure - for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus): + for deferred, pdu in zip(deferreds, pdus): deferred.addCallbacks( callback, errback, - callbackArgs=[pdu, redacted], + callbackArgs=[pdu], errbackArgs=[pdu], ) return deferreds +class PduToCheckSig(namedtuple("PduToCheckSig", [ + "pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds", +])): + pass + + +def _check_sigs_on_pdus(keyring, pdus): + """Check that the given events are correctly signed + + Args: + keyring (synapse.crypto.Keyring): keyring object to do the checks + pdus (Collection[EventBase]): the events to be checked + + Returns: + List[Deferred]: a Deferred for each event in pdus, which will either succeed if + the signatures are valid, or fail (with a SynapseError) if not. + """ + + # (currently this is written assuming the v1 room structure; we'll probably want a + # separate function for checking v2 rooms) + + # we want to check that the event is signed by: + # + # (a) the server which created the event_id + # + # (b) the sender's server. + # + # - except in the case of invites created from a 3pid invite, which are exempt + # from this check, because the sender has to match that of the original 3pid + # invite, but the event may come from a different HS, for reasons that I don't + # entirely grok (why do the senders have to match? and if they do, why doesn't the + # joining server ask the inviting server to do the switcheroo with + # exchange_third_party_invite?). + # + # That's pretty awful, since redacting such an invite will render it invalid + # (because it will then look like a regular invite without a valid signature), + # and signatures are *supposed* to be valid whether or not an event has been + # redacted. But this isn't the worst of the ways that 3pid invites are broken. + # + # let's start by getting the domain for each pdu, and flattening the event back + # to JSON. + pdus_to_check = [ + PduToCheckSig( + pdu=p, + redacted_pdu_json=prune_event(p).get_pdu_json(), + event_id_domain=get_domain_from_id(p.event_id), + sender_domain=get_domain_from_id(p.sender), + deferreds=[], + ) + for p in pdus + ] + + # first make sure that the event is signed by the event_id's domain + deferreds = keyring.verify_json_objects_for_server([ + (p.event_id_domain, p.redacted_pdu_json) + for p in pdus_to_check + ]) + + for p, d in zip(pdus_to_check, deferreds): + p.deferreds.append(d) + + # now let's look for events where the sender's domain is different to the + # event id's domain (normally only the case for joins/leaves), and add additional + # checks. + pdus_to_check_sender = [ + p for p in pdus_to_check + if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu) + ] + + more_deferreds = keyring.verify_json_objects_for_server([ + (p.sender_domain, p.redacted_pdu_json) + for p in pdus_to_check_sender + ]) + + for p, d in zip(pdus_to_check_sender, more_deferreds): + p.deferreds.append(d) + + # replace lists of deferreds with single Deferreds + return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check] + + +def _flatten_deferred_list(deferreds): + """Given a list of one or more deferreds, either return the single deferred, or + combine into a DeferredList. + """ + if len(deferreds) > 1: + return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True) + else: + assert len(deferreds) == 1 + return deferreds[0] + + +def _is_invite_via_3pid(event): + return ( + event.type == EventTypes.Member + and event.membership == Membership.INVITE + and "third_party_invite" in event.content + ) + + def event_from_pdu_json(pdu_json, outlier=False): """Construct a FrozenEvent from an event json received over federation From c127c8d0421f0228a46ebbe280c9537e8d8ea42b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Sep 2018 01:23:18 +0100 Subject: [PATCH 034/138] Fix origin handling for pushed transactions Use the actual origin for push transactions, rather than whatever the remote server claimed. --- synapse/federation/federation_server.py | 20 ++++++++++---------- synapse/federation/persistence.py | 8 ++++---- synapse/federation/transport/server.py | 2 +- tests/handlers/test_typing.py | 19 ++++++++----------- tests/utils.py | 12 +++++++++--- 5 files changed, 32 insertions(+), 29 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 3e0cd294a1..547c6aec80 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -99,7 +99,7 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function - def on_incoming_transaction(self, transaction_data): + def on_incoming_transaction(self, origin, transaction_data): # keep this as early as possible to make the calculated origin ts as # accurate as possible. request_time = self._clock.time_msec() @@ -108,34 +108,33 @@ class FederationServer(FederationBase): if not transaction.transaction_id: raise Exception("Transaction missing transaction_id") - if not transaction.origin: - raise Exception("Transaction missing origin") logger.debug("[%s] Got transaction", transaction.transaction_id) # use a linearizer to ensure that we don't process the same transaction # multiple times in parallel. with (yield self._transaction_linearizer.queue( - (transaction.origin, transaction.transaction_id), + (origin, transaction.transaction_id), )): result = yield self._handle_incoming_transaction( - transaction, request_time, + origin, transaction, request_time, ) defer.returnValue(result) @defer.inlineCallbacks - def _handle_incoming_transaction(self, transaction, request_time): + def _handle_incoming_transaction(self, origin, transaction, request_time): """ Process an incoming transaction and return the HTTP response Args: + origin (unicode): the server making the request transaction (Transaction): incoming transaction request_time (int): timestamp that the HTTP request arrived at Returns: Deferred[(int, object)]: http response code and body """ - response = yield self.transaction_actions.have_responded(transaction) + response = yield self.transaction_actions.have_responded(origin, transaction) if response: logger.debug( @@ -149,7 +148,7 @@ class FederationServer(FederationBase): received_pdus_counter.inc(len(transaction.pdus)) - origin_host, _ = parse_server_name(transaction.origin) + origin_host, _ = parse_server_name(origin) pdus_by_room = {} @@ -190,7 +189,7 @@ class FederationServer(FederationBase): event_id = pdu.event_id try: yield self._handle_received_pdu( - transaction.origin, pdu + origin, pdu ) pdu_results[event_id] = {} except FederationError as e: @@ -212,7 +211,7 @@ class FederationServer(FederationBase): if hasattr(transaction, "edus"): for edu in (Edu(**x) for x in transaction.edus): yield self.received_edu( - transaction.origin, + origin, edu.edu_type, edu.content ) @@ -224,6 +223,7 @@ class FederationServer(FederationBase): logger.debug("Returning: %s", str(response)) yield self.transaction_actions.set_response( + origin, transaction, 200, response ) diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 9146215c21..74ffd13b4f 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -36,7 +36,7 @@ class TransactionActions(object): self.store = datastore @log_function - def have_responded(self, transaction): + def have_responded(self, origin, transaction): """ Have we already responded to a transaction with the same id and origin? @@ -50,11 +50,11 @@ class TransactionActions(object): "transaction_id") return self.store.get_received_txn_response( - transaction.transaction_id, transaction.origin + transaction.transaction_id, origin ) @log_function - def set_response(self, transaction, code, response): + def set_response(self, origin, transaction, code, response): """ Persist how we responded to a transaction. Returns: @@ -66,7 +66,7 @@ class TransactionActions(object): return self.store.set_received_txn_response( transaction.transaction_id, - transaction.origin, + origin, code, response, ) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 77969a4f38..8cde9716ac 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -353,7 +353,7 @@ class FederationSendServlet(BaseFederationServlet): try: code, response = yield self.handler.on_incoming_transaction( - transaction_data + origin, transaction_data, ) except Exception: logger.exception("on_incoming_transaction failed") diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index ad58073a14..c2d951b45f 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -33,7 +33,7 @@ from ..utils import ( ) -def _expect_edu(destination, edu_type, content, origin="test"): +def _expect_edu_transaction(edu_type, content, origin="test"): return { "origin": origin, "origin_server_ts": 1000000, @@ -42,8 +42,8 @@ def _expect_edu(destination, edu_type, content, origin="test"): } -def _make_edu_json(origin, edu_type, content): - return json.dumps(_expect_edu("test", edu_type, content, origin=origin)).encode( +def _make_edu_transaction_json(edu_type, content): + return json.dumps(_expect_edu_transaction(edu_type, content)).encode( 'utf8' ) @@ -190,8 +190,7 @@ class TypingNotificationsTestCase(unittest.TestCase): call( "farm", path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu( - "farm", + data=_expect_edu_transaction( "m.typing", content={ "room_id": self.room_id, @@ -221,11 +220,10 @@ class TypingNotificationsTestCase(unittest.TestCase): self.assertEquals(self.event_source.get_current_key(), 0) - yield self.mock_federation_resource.trigger( + (code, response) = yield self.mock_federation_resource.trigger( "PUT", "/_matrix/federation/v1/send/1000000/", - _make_edu_json( - "farm", + _make_edu_transaction_json( "m.typing", content={ "room_id": self.room_id, @@ -233,7 +231,7 @@ class TypingNotificationsTestCase(unittest.TestCase): "typing": True, }, ), - federation_auth=True, + federation_auth_origin=b'farm', ) self.on_new_event.assert_has_calls( @@ -264,8 +262,7 @@ class TypingNotificationsTestCase(unittest.TestCase): call( "farm", path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu( - "farm", + data=_expect_edu_transaction( "m.typing", content={ "room_id": self.room_id, diff --git a/tests/utils.py b/tests/utils.py index bb0fc74054..8de2898b2f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -306,7 +306,10 @@ class MockHttpResource(HttpServer): @patch('twisted.web.http.Request') @defer.inlineCallbacks - def trigger(self, http_method, path, content, mock_request, federation_auth=False): + def trigger( + self, http_method, path, content, mock_request, + federation_auth_origin=None, + ): """ Fire an HTTP event. Args: @@ -315,6 +318,7 @@ class MockHttpResource(HttpServer): content : The HTTP body mock_request : Mocked request to pass to the event so it can get content. + federation_auth_origin (bytes|None): domain to authenticate as, for federation Returns: A tuple of (code, response) Raises: @@ -335,8 +339,10 @@ class MockHttpResource(HttpServer): mock_request.getClientIP.return_value = "-" headers = {} - if federation_auth: - headers[b"Authorization"] = [b"X-Matrix origin=test,key=,sig="] + if federation_auth_origin is not None: + headers[b"Authorization"] = [ + b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin, ) + ] mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers) # return the right path if the event requires it From 2d2828dcbc2c6360d28a64d3849cf849eb5348c4 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 6 Sep 2018 00:10:47 +1000 Subject: [PATCH 035/138] Port http/ to Python 3 (#3771) --- changelog.d/3771.misc | 1 + setup.cfg | 5 +- synapse/appservice/api.py | 13 +- synapse/federation/federation_server.py | 10 +- synapse/http/client.py | 82 ++++------ synapse/http/matrixfederationclient.py | 204 ++++++++++-------------- synapse/http/site.py | 4 +- synapse/python_dependencies.py | 1 + 8 files changed, 134 insertions(+), 186 deletions(-) create mode 100644 changelog.d/3771.misc diff --git a/changelog.d/3771.misc b/changelog.d/3771.misc new file mode 100644 index 0000000000..47aa34bc04 --- /dev/null +++ b/changelog.d/3771.misc @@ -0,0 +1 @@ +http/ is now ported to Python 3. diff --git a/setup.cfg b/setup.cfg index c2620be6c5..52feaa9cc7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,13 +17,14 @@ ignore = [pep8] max-line-length = 90 # W503 requires that binary operators be at the end, not start, of lines. Erik -# doesn't like it. E203 is contrary to PEP8. -ignore = W503,E203 +# doesn't like it. E203 is contrary to PEP8. E731 is silly. +ignore = W503,E203,E731 [flake8] # note that flake8 inherits the "ignore" settings from "pep8" (because it uses # pep8 to do those checks), but not the "max-line-length" setting max-line-length = 90 +ignore=W503,E203,E731 [isort] line_length = 89 diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 6980e5890e..9ccc5a80fc 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -13,7 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import urllib + +from six.moves import urllib from prometheus_client import Counter @@ -98,7 +99,7 @@ class ApplicationServiceApi(SimpleHttpClient): def query_user(self, service, user_id): if service.url is None: defer.returnValue(False) - uri = service.url + ("/users/%s" % urllib.quote(user_id)) + uri = service.url + ("/users/%s" % urllib.parse.quote(user_id)) response = None try: response = yield self.get_json(uri, { @@ -119,7 +120,7 @@ class ApplicationServiceApi(SimpleHttpClient): def query_alias(self, service, alias): if service.url is None: defer.returnValue(False) - uri = service.url + ("/rooms/%s" % urllib.quote(alias)) + uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias)) response = None try: response = yield self.get_json(uri, { @@ -153,7 +154,7 @@ class ApplicationServiceApi(SimpleHttpClient): service.url, APP_SERVICE_PREFIX, kind, - urllib.quote(protocol) + urllib.parse.quote(protocol) ) try: response = yield self.get_json(uri, fields) @@ -188,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient): uri = "%s%s/thirdparty/protocol/%s" % ( service.url, APP_SERVICE_PREFIX, - urllib.quote(protocol) + urllib.parse.quote(protocol) ) try: info = yield self.get_json(uri, {}) @@ -228,7 +229,7 @@ class ApplicationServiceApi(SimpleHttpClient): txn_id = str(txn_id) uri = service.url + ("/transactions/%s" % - urllib.quote(txn_id)) + urllib.parse.quote(txn_id)) try: yield self.put_json( uri=uri, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 3e0cd294a1..6e52c4b6b5 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -838,9 +838,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry): ) return self._send_edu( - edu_type=edu_type, - origin=origin, - content=content, + edu_type=edu_type, + origin=origin, + content=content, ) def on_query(self, query_type, args): @@ -851,6 +851,6 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry): return handler(args) return self._get_query_client( - query_type=query_type, - args=args, + query_type=query_type, + args=args, ) diff --git a/synapse/http/client.py b/synapse/http/client.py index ab4fbf59b2..4ba54fed05 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -13,24 +13,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import logging -import urllib -from six import StringIO +from six import text_type +from six.moves import urllib +import treq from canonicaljson import encode_canonical_json, json from prometheus_client import Counter from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE -from twisted.internet import defer, protocol, reactor, ssl, task +from twisted.internet import defer, protocol, reactor, ssl from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.web._newclient import ResponseDone from twisted.web.client import ( Agent, BrowserLikeRedirectAgent, ContentDecoderAgent, - FileBodyProducer as TwistedFileBodyProducer, GzipDecoder, HTTPConnectionPool, PartialDownloadError, @@ -83,18 +84,20 @@ class SimpleHttpClient(object): if hs.config.user_agent_suffix: self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,) + self.user_agent = self.user_agent.encode('ascii') + @defer.inlineCallbacks - def request(self, method, uri, *args, **kwargs): + def request(self, method, uri, data=b'', headers=None): # A small wrapper around self.agent.request() so we can easily attach # counters to it outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) - logger.info("Sending request %s %s", method, redact_uri(uri)) + logger.info("Sending request %s %s", method, redact_uri(uri.encode('ascii'))) try: - request_deferred = self.agent.request( - method, uri, *args, **kwargs + request_deferred = treq.request( + method, uri, agent=self.agent, data=data, headers=headers ) add_timeout_to_deferred( request_deferred, 60, self.hs.get_reactor(), @@ -105,14 +108,14 @@ class SimpleHttpClient(object): incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", - method, redact_uri(uri), response.code + method, redact_uri(uri.encode('ascii')), response.code ) defer.returnValue(response) except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", - method, redact_uri(uri), type(e).__name__, e.message + method, redact_uri(uri.encode('ascii')), type(e).__name__, e.args[0] ) raise @@ -137,7 +140,8 @@ class SimpleHttpClient(object): # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) - query_bytes = urllib.urlencode(encode_urlencode_args(args), True) + query_bytes = urllib.parse.urlencode( + encode_urlencode_args(args), True).encode("utf8") actual_headers = { b"Content-Type": [b"application/x-www-form-urlencoded"], @@ -148,15 +152,14 @@ class SimpleHttpClient(object): response = yield self.request( "POST", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(query_bytes)) + data=query_bytes ) - body = yield make_deferred_yieldable(readBody(response)) - if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + body = yield make_deferred_yieldable(treq.json_content(response)) + defer.returnValue(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -191,9 +194,9 @@ class SimpleHttpClient(object): response = yield self.request( "POST", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(json_str)) + data=json_str ) body = yield make_deferred_yieldable(readBody(response)) @@ -248,7 +251,7 @@ class SimpleHttpClient(object): ValueError: if the response was not JSON """ if len(args): - query_bytes = urllib.urlencode(args, True) + query_bytes = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) json_str = encode_canonical_json(json_body) @@ -262,9 +265,9 @@ class SimpleHttpClient(object): response = yield self.request( "PUT", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(json_str)) + data=json_str ) body = yield make_deferred_yieldable(readBody(response)) @@ -293,7 +296,7 @@ class SimpleHttpClient(object): HttpResponseException on a non-2xx HTTP response. """ if len(args): - query_bytes = urllib.urlencode(args, True) + query_bytes = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) actual_headers = { @@ -304,7 +307,7 @@ class SimpleHttpClient(object): response = yield self.request( "GET", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), ) @@ -339,7 +342,7 @@ class SimpleHttpClient(object): response = yield self.request( "GET", - url.encode("ascii"), + url, headers=Headers(actual_headers), ) @@ -434,12 +437,12 @@ class CaptchaServerHttpClient(SimpleHttpClient): @defer.inlineCallbacks def post_urlencoded_get_raw(self, url, args={}): - query_bytes = urllib.urlencode(encode_urlencode_args(args), True) + query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True) response = yield self.request( "POST", - url.encode("ascii"), - bodyProducer=FileBodyProducer(StringIO(query_bytes)), + url, + data=query_bytes, headers=Headers({ b"Content-Type": [b"application/x-www-form-urlencoded"], b"User-Agent": [self.user_agent], @@ -510,7 +513,7 @@ def encode_urlencode_args(args): def encode_urlencode_arg(arg): - if isinstance(arg, unicode): + if isinstance(arg, text_type): return arg.encode('utf-8') elif isinstance(arg, list): return [encode_urlencode_arg(i) for i in arg] @@ -542,26 +545,3 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory): def creatorForNetloc(self, hostname, port): return self - - -class FileBodyProducer(TwistedFileBodyProducer): - """Workaround for https://twistedmatrix.com/trac/ticket/8473 - - We override the pauseProducing and resumeProducing methods in twisted's - FileBodyProducer so that they do not raise exceptions if the task has - already completed. - """ - - def pauseProducing(self): - try: - super(FileBodyProducer, self).pauseProducing() - except task.TaskDone: - # task has already completed - pass - - def resumeProducing(self): - try: - super(FileBodyProducer, self).resumeProducing() - except task.NotPaused: - # task was not paused (probably because it had already completed) - pass diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b34bb8e31a..6a1fc8ca55 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -17,19 +17,19 @@ import cgi import logging import random import sys -import urllib -from six import string_types -from six.moves.urllib import parse as urlparse +from six import PY3, string_types +from six.moves import urllib -from canonicaljson import encode_canonical_json, json +import treq +from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json from twisted.internet import defer, protocol, reactor from twisted.internet.error import DNSLookupError from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool, readBody +from twisted.web.client import Agent, HTTPConnectionPool from twisted.web.http_headers import Headers import synapse.metrics @@ -58,13 +58,18 @@ incoming_responses_counter = Counter("synapse_http_matrixfederationclient_respon MAX_LONG_RETRIES = 10 MAX_SHORT_RETRIES = 3 +if PY3: + MAXINT = sys.maxsize +else: + MAXINT = sys.maxint + class MatrixFederationEndpointFactory(object): def __init__(self, hs): self.tls_client_options_factory = hs.tls_client_options_factory def endpointForURI(self, uri): - destination = uri.netloc + destination = uri.netloc.decode('ascii') return matrix_federation_endpoint( reactor, destination, timeout=10, @@ -93,26 +98,32 @@ class MatrixFederationHttpClient(object): ) self.clock = hs.get_clock() self._store = hs.get_datastore() - self.version_string = hs.version_string + self.version_string = hs.version_string.encode('ascii') self._next_id = 1 def _create_url(self, destination, path_bytes, param_bytes, query_bytes): - return urlparse.urlunparse( - ("matrix", destination, path_bytes, param_bytes, query_bytes, "") + return urllib.parse.urlunparse( + (b"matrix", destination, path_bytes, param_bytes, query_bytes, b"") ) @defer.inlineCallbacks def _request(self, destination, method, path, - body_callback, headers_dict={}, param_bytes=b"", - query_bytes=b"", retry_on_dns_fail=True, + json=None, json_callback=None, + param_bytes=b"", + query=None, retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False): - """ Creates and sends a request to the given server + """ + Creates and sends a request to the given server. + Args: destination (str): The remote server to send the HTTP request to. method (str): HTTP method path (str): The HTTP path + json (dict or None): JSON to send in the body. + json_callback (func or None): A callback to generate the JSON. + query (dict or None): Query arguments. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 @@ -146,22 +157,29 @@ class MatrixFederationHttpClient(object): ignore_backoff=ignore_backoff, ) - destination = destination.encode("ascii") + headers_dict = {} path_bytes = path.encode("ascii") - with limiter: - headers_dict[b"User-Agent"] = [self.version_string] - headers_dict[b"Host"] = [destination] + if query: + query_bytes = encode_query_args(query) + else: + query_bytes = b"" - url_bytes = self._create_url( - destination, path_bytes, param_bytes, query_bytes - ) + headers_dict = { + "User-Agent": [self.version_string], + "Host": [destination], + } + + with limiter: + url = self._create_url( + destination.encode("ascii"), path_bytes, param_bytes, query_bytes + ).decode('ascii') txn_id = "%s-O-%s" % (method, self._next_id) - self._next_id = (self._next_id + 1) % (sys.maxint - 1) + self._next_id = (self._next_id + 1) % (MAXINT - 1) outbound_logger.info( "{%s} [%s] Sending request: %s %s", - txn_id, destination, method, url_bytes + txn_id, destination, method, url ) # XXX: Would be much nicer to retry only at the transaction-layer @@ -171,23 +189,33 @@ class MatrixFederationHttpClient(object): else: retries_left = MAX_SHORT_RETRIES - http_url_bytes = urlparse.urlunparse( - ("", "", path_bytes, param_bytes, query_bytes, "") - ) + http_url = urllib.parse.urlunparse( + (b"", b"", path_bytes, param_bytes, query_bytes, b"") + ).decode('ascii') log_result = None try: while True: - producer = None - if body_callback: - producer = body_callback(method, http_url_bytes, headers_dict) - try: - request_deferred = self.agent.request( + if json_callback: + json = json_callback() + + if json: + data = encode_canonical_json(json) + headers_dict["Content-Type"] = ["application/json"] + self.sign_request( + destination, method, http_url, headers_dict, json + ) + else: + data = None + self.sign_request(destination, method, http_url, headers_dict) + + request_deferred = treq.request( method, - url_bytes, - Headers(headers_dict), - producer + url, + headers=Headers(headers_dict), + data=data, + agent=self.agent, ) add_timeout_to_deferred( request_deferred, @@ -218,7 +246,7 @@ class MatrixFederationHttpClient(object): txn_id, destination, method, - url_bytes, + url, _flatten_response_never_received(e), ) @@ -252,7 +280,7 @@ class MatrixFederationHttpClient(object): # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + body = yield treq.content(response) raise HttpResponseException( response.code, response.phrase, body ) @@ -297,11 +325,11 @@ class MatrixFederationHttpClient(object): auth_headers = [] for key, sig in request["signatures"][self.server_name].items(): - auth_headers.append(bytes( + auth_headers.append(( "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % ( self.server_name, key, sig, - ) - )) + )).encode('ascii') + ) headers_dict[b"Authorization"] = auth_headers @@ -347,24 +375,14 @@ class MatrixFederationHttpClient(object): """ if not json_data_callback: - def json_data_callback(): - return data - - def body_callback(method, url_bytes, headers_dict): - json_data = json_data_callback() - self.sign_request( - destination, method, url_bytes, headers_dict, json_data - ) - producer = _JsonProducer(json_data) - return producer + json_data_callback = lambda: data response = yield self._request( destination, "PUT", path, - body_callback=body_callback, - headers_dict={"Content-Type": ["application/json"]}, - query_bytes=encode_query_args(args), + json_callback=json_data_callback, + query=args, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -376,8 +394,8 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield readBody(response) - defer.returnValue(json.loads(body)) + body = yield treq.json_content(response) + defer.returnValue(body) @defer.inlineCallbacks def post_json(self, destination, path, data={}, long_retries=False, @@ -410,20 +428,12 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - - def body_callback(method, url_bytes, headers_dict): - self.sign_request( - destination, method, url_bytes, headers_dict, data - ) - return _JsonProducer(data) - response = yield self._request( destination, "POST", path, - query_bytes=encode_query_args(args), - body_callback=body_callback, - headers_dict={"Content-Type": ["application/json"]}, + query=args, + json=data, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -434,9 +444,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + body = yield treq.json_content(response) - defer.returnValue(json.loads(body)) + defer.returnValue(body) @defer.inlineCallbacks def get_json(self, destination, path, args=None, retry_on_dns_fail=True, @@ -471,16 +481,11 @@ class MatrixFederationHttpClient(object): logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) - def body_callback(method, url_bytes, headers_dict): - self.sign_request(destination, method, url_bytes, headers_dict) - return None - response = yield self._request( destination, "GET", path, - query_bytes=encode_query_args(args), - body_callback=body_callback, + query=args, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, ignore_backoff=ignore_backoff, @@ -491,9 +496,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + body = yield treq.json_content(response) - defer.returnValue(json.loads(body)) + defer.returnValue(body) @defer.inlineCallbacks def delete_json(self, destination, path, long_retries=False, @@ -523,13 +528,11 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - response = yield self._request( destination, "DELETE", path, - query_bytes=encode_query_args(args), - headers_dict={"Content-Type": ["application/json"]}, + query=args, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -540,9 +543,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + body = yield treq.json_content(response) - defer.returnValue(json.loads(body)) + defer.returnValue(body) @defer.inlineCallbacks def get_file(self, destination, path, output_stream, args={}, @@ -569,26 +572,11 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ - - encoded_args = {} - for k, vs in args.items(): - if isinstance(vs, string_types): - vs = [vs] - encoded_args[k] = [v.encode("UTF-8") for v in vs] - - query_bytes = urllib.urlencode(encoded_args, True) - logger.debug("Query bytes: %s Retry DNS: %s", query_bytes, retry_on_dns_fail) - - def body_callback(method, url_bytes, headers_dict): - self.sign_request(destination, method, url_bytes, headers_dict) - return None - response = yield self._request( destination, "GET", path, - query_bytes=query_bytes, - body_callback=body_callback, + query=args, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff, ) @@ -639,30 +627,6 @@ def _readBodyToFile(response, stream, max_size): return d -class _JsonProducer(object): - """ Used by the twisted http client to create the HTTP body from json - """ - def __init__(self, jsn): - self.reset(jsn) - - def reset(self, jsn): - self.body = encode_canonical_json(jsn) - self.length = len(self.body) - - def startProducing(self, consumer): - consumer.write(self.body) - return defer.succeed(None) - - def pauseProducing(self): - pass - - def stopProducing(self): - pass - - def resumeProducing(self): - pass - - def _flatten_response_never_received(e): if hasattr(e, "reasons"): reasons = ", ".join( @@ -693,7 +657,7 @@ def check_content_type_is_json(headers): "No Content-Type header" ) - c_type = c_type[0] # only the first header + c_type = c_type[0].decode('ascii') # only the first header val, options = cgi.parse_header(c_type) if val != "application/json": raise RuntimeError( @@ -711,6 +675,6 @@ def encode_query_args(args): vs = [vs] encoded_args[k] = [v.encode("UTF-8") for v in vs] - query_bytes = urllib.urlencode(encoded_args, True) + query_bytes = urllib.parse.urlencode(encoded_args, True) - return query_bytes + return query_bytes.encode('utf8') diff --git a/synapse/http/site.py b/synapse/http/site.py index 88ed3714f9..f0828c6542 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -204,14 +204,14 @@ class SynapseRequest(Request): self.start_time = time.time() self.request_metrics = RequestMetrics() self.request_metrics.start( - self.start_time, name=servlet_name, method=self.method, + self.start_time, name=servlet_name, method=self.method.decode('ascii'), ) self.site.access_logger.info( "%s - %s - Received request: %s %s", self.getClientIP(), self.site.site_tag, - self.method, + self.method.decode('ascii'), self.get_redacted_uri() ) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 942d7c721f..6dd5179320 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -40,6 +40,7 @@ REQUIREMENTS = { "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], "service_identity>=1.0.0": ["service_identity>=1.0.0"], "Twisted>=17.1.0": ["twisted>=17.1.0"], + "treq>=15.1": ["treq>=15.1"], # We use crypto.get_elliptic_curve which is only supported in >=0.15 "pyopenssl>=0.15": ["OpenSSL>=0.15"], From 7419764351aa2b2f00f363beaee6ec53c02c492e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Sep 2018 16:19:50 +0100 Subject: [PATCH 036/138] User iter* during sync state calculations --- synapse/handlers/sync.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ef20c2296c..0965cf2fc5 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1729,17 +1729,17 @@ def _calculate_state( event_id_to_key = { e: key for key, e in itertools.chain( - timeline_contains.items(), - previous.items(), - timeline_start.items(), - current.items(), + iteritems(timeline_contains), + iteritems(previous), + iteritems(timeline_start), + iteritems(current), ) } - c_ids = set(e for e in current.values()) - ts_ids = set(e for e in timeline_start.values()) - p_ids = set(e for e in previous.values()) - tc_ids = set(e for e in timeline_contains.values()) + c_ids = set(e for e in itervalues(current)) + ts_ids = set(e for e in itervalues(timeline_start)) + p_ids = set(e for e in itervalues(previous)) + tc_ids = set(e for e in itervalues(timeline_contains)) # If we are lazyloading room members, we explicitly add the membership events # for the senders in the timeline into the state block returned by /sync, From 2254790ae458160117959b364378f1c6cf338005 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Sep 2018 16:21:18 +0100 Subject: [PATCH 037/138] Newsfile --- changelog.d/3795.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3795.misc diff --git a/changelog.d/3795.misc b/changelog.d/3795.misc new file mode 100644 index 0000000000..9f64ee5e2b --- /dev/null +++ b/changelog.d/3795.misc @@ -0,0 +1 @@ +Make /sync slightly faster by avoiding needless copies From 61b05727fa6ba879f507d13ff40c0197916ef795 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Sep 2018 22:30:36 +0100 Subject: [PATCH 038/138] guest users should not be part of mau total --- synapse/storage/monthly_active_users.py | 7 +++- tests/storage/test_monthly_active_users.py | 44 +++++++++++++++++++++- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index c7899d7fd2..e34e16c079 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -192,14 +192,19 @@ class MonthlyActiveUsersStore(SQLBaseStore): )) @defer.inlineCallbacks - def populate_monthly_active_users(self, user_id): + def populate_monthly_active_users(self, user_id, is_guest=False): """Checks on the state of monthly active user limits and optionally add the user to the monthly active tables Args: user_id(str): the user_id to query """ + if self.hs.config.limit_usage_by_mau: + # Guests should not be included as part of MAU group + if is_guest: + return + is_trial = yield self.is_trial_user(user_id) if is_trial: # we don't track trial users in the MAU table. diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 2036287288..cfe0a7c77d 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -12,6 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + +from twisted.internet import defer from tests.unittest import HomeserverTestCase @@ -23,7 +26,8 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): hs = self.setup_test_homeserver() self.store = hs.get_datastore() - + hs.config.limit_usage_by_mau = True + hs.config.max_mau_value = 50 # Advance the clock a bit reactor.advance(FORTY_DAYS) @@ -73,7 +77,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): active_count = self.store.get_monthly_active_count() self.assertEquals(self.get_success(active_count), user_num) - # Test that regalar users are removed from the db + # Test that regular users are removed from the db ru_count = 2 self.store.upsert_monthly_active_user("@ru1:server") self.store.upsert_monthly_active_user("@ru2:server") @@ -139,3 +143,39 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): count = self.store.get_monthly_active_count() self.assertEquals(self.get_success(count), 0) + + def test_populate_monthly_users_is_guest(self): + # Test that guest users are not added to mau list + self.store.upsert_monthly_active_user = Mock() + self.store.populate_monthly_active_users('user_id', True) + self.pump() + self.store.upsert_monthly_active_user.assert_not_called() + + def test_populate_monthly_users_should_update(self): + self.store.upsert_monthly_active_user = Mock() + + self.store.is_trial_user = Mock( + return_value=defer.succeed(False) + ) + + self.store.user_last_seen_monthly_active = Mock( + return_value=defer.succeed(None) + ) + self.store.populate_monthly_active_users('user_id') + self.pump() + self.store.upsert_monthly_active_user.assert_called_once() + + def test_populate_monthly_users_should_not_update(self): + self.store.upsert_monthly_active_user = Mock() + + self.store.is_trial_user = Mock( + return_value=defer.succeed(False) + ) + self.store.user_last_seen_monthly_active = Mock( + return_value=defer.succeed( + self.hs.get_clock().time_msec() + ) + ) + self.store.populate_monthly_active_users('user_id') + self.pump() + self.store.upsert_monthly_active_user.assert_not_called() From 92657be7d0e108f11f5390222df9efc9f1e55908 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Sep 2018 22:33:45 +0100 Subject: [PATCH 039/138] towncrier --- changelog.d/3800.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3800.bugfix diff --git a/changelog.d/3800.bugfix b/changelog.d/3800.bugfix new file mode 100644 index 0000000000..6b2e18b4a6 --- /dev/null +++ b/changelog.d/3800.bugfix @@ -0,0 +1 @@ +guest users should not be part of mau total From 9c8cd855da4c32dc9219655d13cb86d6f9933381 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Thu, 6 Sep 2018 10:39:55 +0200 Subject: [PATCH 040/138] remove synctl from .dockerignore --- .dockerignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 6cdb8532d3..0180602e56 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,6 +3,5 @@ Dockerfile .gitignore demo/etc tox.ini -synctl .git/* .tox/* From af3125226d04f5dc9f74168ae2654a062537f075 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Thu, 6 Sep 2018 10:46:00 +0200 Subject: [PATCH 041/138] Create 3802.misc --- changelog.d/3802.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3802.misc diff --git a/changelog.d/3802.misc b/changelog.d/3802.misc new file mode 100644 index 0000000000..a00eccb283 --- /dev/null +++ b/changelog.d/3802.misc @@ -0,0 +1 @@ +Unignore synctl in .dockerignore to fix docker builds From 78d1042c1047133e837ded583b5289096526544f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Thu, 6 Sep 2018 10:39:55 +0200 Subject: [PATCH 042/138] remove synctl from .dockerignore --- .dockerignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 6cdb8532d3..0180602e56 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,6 +3,5 @@ Dockerfile .gitignore demo/etc tox.ini -synctl .git/* .tox/* From 0cd7b209e2540b35e2471ac8abe3a8a4e6a74725 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Thu, 6 Sep 2018 10:46:00 +0200 Subject: [PATCH 043/138] Create 3802.misc --- changelog.d/3802.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3802.misc diff --git a/changelog.d/3802.misc b/changelog.d/3802.misc new file mode 100644 index 0000000000..a00eccb283 --- /dev/null +++ b/changelog.d/3802.misc @@ -0,0 +1 @@ +Unignore synctl in .dockerignore to fix docker builds From 80189ed27c5c1b390cc9783b2931e71cccdb3c74 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Sep 2018 17:03:49 +0100 Subject: [PATCH 044/138] prepare v0.33.3.1 --- CHANGES.md | 14 ++++++++++++++ changelog.d/3802.misc | 1 - synapse/__init__.py | 2 +- 3 files changed, 15 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/3802.misc diff --git a/CHANGES.md b/CHANGES.md index a35f5aebc7..59feb6b1a7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,17 @@ +Synapse 0.33.3.1 (2018-09-06) +============================= + +SECURITY FIXES +-------------- + +- Fix an issue where event signatures were not always correctly validated ([\#3796](https://github.com/matrix-org/synapse/issues/3796)) +- Fix an issue where server_acls could be circumvented for incoming events ([\#3796](https://github.com/matrix-org/synapse/issues/3796)) + +Internal Changes +---------------- + +- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802)) + Synapse 0.33.3 (2018-08-22) =========================== diff --git a/changelog.d/3802.misc b/changelog.d/3802.misc deleted file mode 100644 index a00eccb283..0000000000 --- a/changelog.d/3802.misc +++ /dev/null @@ -1 +0,0 @@ -Unignore synctl in .dockerignore to fix docker builds diff --git a/synapse/__init__.py b/synapse/__init__.py index e62901b761..e395a0a9ee 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.33.3" +__version__ = "0.33.3.1" From 625542878de2049a7a0b8381a75a77d09c931a82 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 6 Sep 2018 12:53:15 +0100 Subject: [PATCH 045/138] bump dep on pyopenssl to 16.x --- synapse/python_dependencies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 6dd5179320..0d8de600cf 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -42,8 +42,8 @@ REQUIREMENTS = { "Twisted>=17.1.0": ["twisted>=17.1.0"], "treq>=15.1": ["treq>=15.1"], - # We use crypto.get_elliptic_curve which is only supported in >=0.15 - "pyopenssl>=0.15": ["OpenSSL>=0.15"], + # Twisted has required pyopenssl 16.0 since about Twisted 16.6. + "pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"], "pyyaml": ["yaml"], "pyasn1": ["pyasn1"], From b3c2ebba3231255c9a2b4607e8344d7dcd3a5791 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 6 Sep 2018 12:55:17 +0100 Subject: [PATCH 046/138] changelog --- changelog.d/3804.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3804.bugfix diff --git a/changelog.d/3804.bugfix b/changelog.d/3804.bugfix new file mode 100644 index 0000000000..a0cef20e3f --- /dev/null +++ b/changelog.d/3804.bugfix @@ -0,0 +1 @@ +Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. From 417e7077aadb9f9832dabb0fa93e39e58df92a8a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 14:08:55 +0100 Subject: [PATCH 047/138] Bump version and changelog --- CHANGES.md | 6 ++++++ synapse/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 4db47da3e3..fecfa7feec 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 0.33.4rc2 (2018-09-06) +============================== + +Pull in security fixes from v0.33.3.1 + + Synapse 0.33.3.1 (2018-09-06) ============================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 2c42893673..063c304534 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.33.4rc1" +__version__ = "0.33.4rc2" From 2608ebc04c8b7ffb3417eeb720801d70ddc7dccd Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 7 Sep 2018 00:22:23 +1000 Subject: [PATCH 048/138] Port handlers/ to Python 3 (#3803) --- .gitignore | 1 + changelog.d/3803.misc | 1 + synapse/handlers/auth.py | 8 +++++--- synapse/handlers/e2e_keys.py | 5 +++-- synapse/handlers/federation.py | 6 +++--- synapse/handlers/room_list.py | 2 +- synapse/handlers/search.py | 14 +++++++------- synapse/handlers/sync.py | 6 +++--- 8 files changed, 24 insertions(+), 19 deletions(-) create mode 100644 changelog.d/3803.misc diff --git a/.gitignore b/.gitignore index 9f42a7568f..1718185384 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,7 @@ media_store/ build/ venv/ venv*/ +*venv/ localhost-800*/ static/client/register/register_config.js diff --git a/changelog.d/3803.misc b/changelog.d/3803.misc new file mode 100644 index 0000000000..2b60653c29 --- /dev/null +++ b/changelog.d/3803.misc @@ -0,0 +1 @@ +handlers/ is now ported to Python 3. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 4a81bd2ba9..2a5eab124f 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -895,22 +895,24 @@ class AuthHandler(BaseHandler): Args: password (unicode): Password to hash. - stored_hash (unicode): Expected hash value. + stored_hash (bytes): Expected hash value. Returns: Deferred(bool): Whether self.hash(password) == stored_hash. """ - def _do_validate_hash(): # Normalise the Unicode in the password pw = unicodedata.normalize("NFKC", password) return bcrypt.checkpw( pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"), - stored_hash.encode('utf8') + stored_hash ) if stored_hash: + if not isinstance(stored_hash, bytes): + stored_hash = stored_hash.encode('ascii') + return make_deferred_yieldable( threads.deferToThreadPool( self.hs.get_reactor(), diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 5816bf8b4f..578e9250fb 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -330,7 +330,8 @@ class E2eKeysHandler(object): (algorithm, key_id, ex_json, key) ) else: - new_keys.append((algorithm, key_id, encode_canonical_json(key))) + new_keys.append(( + algorithm, key_id, encode_canonical_json(key).decode('ascii'))) yield self.store.add_e2e_one_time_keys( user_id, device_id, time_now, new_keys @@ -358,7 +359,7 @@ def _exception_to_failure(e): # Note that some Exceptions (notably twisted's ResponseFailed etc) don't # give a string for e.message, which json then fails to serialize. return { - "status": 503, "message": str(e.message), + "status": 503, "message": str(e), } diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3fa7a98445..0c68e8a472 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -594,7 +594,7 @@ class FederationHandler(BaseHandler): required_auth = set( a_id - for event in events + state_events.values() + auth_events.values() + for event in events + list(state_events.values()) + list(auth_events.values()) for a_id, _ in event.auth_events ) auth_events.update({ @@ -802,7 +802,7 @@ class FederationHandler(BaseHandler): ) continue except NotRetryingDestination as e: - logger.info(e.message) + logger.info(str(e)) continue except FederationDeniedError as e: logger.info(e) @@ -1358,7 +1358,7 @@ class FederationHandler(BaseHandler): ) if state_groups: - _, state = state_groups.items().pop() + _, state = list(state_groups.items()).pop() results = state if event.is_state(): diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 37e41afd61..38e1737ec9 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -162,7 +162,7 @@ class RoomListHandler(BaseHandler): # Filter out rooms that we don't want to return rooms_to_scan = [ r for r in sorted_rooms - if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0 + if r not in newly_unpublished and rooms_to_num_joined[r] > 0 ] total_room_count = len(rooms_to_scan) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index c464adbd0b..0c1d52fd11 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -54,7 +54,7 @@ class SearchHandler(BaseHandler): batch_token = None if batch: try: - b = decode_base64(batch) + b = decode_base64(batch).decode('ascii') batch_group, batch_group_key, batch_token = b.split("\n") assert batch_group is not None @@ -258,18 +258,18 @@ class SearchHandler(BaseHandler): # it returns more from the same group (if applicable) rather # than reverting to searching all results again. if batch_group and batch_group_key: - global_next_batch = encode_base64("%s\n%s\n%s" % ( + global_next_batch = encode_base64(("%s\n%s\n%s" % ( batch_group, batch_group_key, pagination_token - )) + )).encode('ascii')) else: - global_next_batch = encode_base64("%s\n%s\n%s" % ( + global_next_batch = encode_base64(("%s\n%s\n%s" % ( "all", "", pagination_token - )) + )).encode('ascii')) for room_id, group in room_groups.items(): - group["next_batch"] = encode_base64("%s\n%s\n%s" % ( + group["next_batch"] = encode_base64(("%s\n%s\n%s" % ( "room_id", room_id, pagination_token - )) + )).encode('ascii')) allowed_events.extend(room_events) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ef20c2296c..0091ceb80e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -545,7 +545,7 @@ class SyncHandler(object): member_ids = { state_key: event_id - for (t, state_key), event_id in state_ids.iteritems() + for (t, state_key), event_id in iteritems(state_ids) if t == EventTypes.Member } name_id = state_ids.get((EventTypes.Name, '')) @@ -774,7 +774,7 @@ class SyncHandler(object): logger.debug("filtering state from %r...", state_ids) state_ids = { t: event_id - for t, event_id in state_ids.iteritems() + for t, event_id in iteritems(state_ids) if cache.get(t[1]) != event_id } logger.debug("...to %r", state_ids) @@ -1753,7 +1753,7 @@ def _calculate_state( if lazy_load_members: p_ids.difference_update( - e for t, e in timeline_start.iteritems() + e for t, e in iteritems(timeline_start) if t[0] == EventTypes.Member ) From 6707a3212cea6849d1c16b70cc1927b16d06814a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 15:23:55 +0100 Subject: [PATCH 049/138] Limit the number of PDUs/EDUs per fedreation transaction --- synapse/federation/transaction_queue.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 94d7423d01..8cbf8c4f7f 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -463,7 +463,19 @@ class TransactionQueue(object): # pending_transactions flag. pending_pdus = self.pending_pdus_by_dest.pop(destination, []) + + # We can only include at most 50 PDUs per transactions + pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:] + if leftover_pdus: + self.pending_pdus_by_dest[destination] = leftover_pdus + pending_edus = self.pending_edus_by_dest.pop(destination, []) + + # We can only include at most 100 EDUs per transactions + pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:] + if leftover_edus: + self.pending_edus_by_dest[destination] = leftover_edus + pending_presence = self.pending_presence_by_dest.pop(destination, {}) pending_edus.extend( From 28f5bfdcf7d806c958c850f8ac733557d485ddf6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 15:25:58 +0100 Subject: [PATCH 050/138] Newsfile --- changelog.d/3805.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3805.misc diff --git a/changelog.d/3805.misc b/changelog.d/3805.misc new file mode 100644 index 0000000000..8173f6da31 --- /dev/null +++ b/changelog.d/3805.misc @@ -0,0 +1 @@ +Limit the number of PDUs/EDUs per fedreation transaction From 3b4223aa234ec9f87d29a747af3e0b723d1183b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 15:27:54 +0100 Subject: [PATCH 051/138] Only start postgres instance for postgres tests on Travis CI --- .travis.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 11c76db2e5..ebc972ed24 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,9 +8,6 @@ before_script: - git remote set-branches --add origin develop - git fetch origin develop -services: - - postgresql - matrix: fast_finish: true include: @@ -25,6 +22,8 @@ matrix: - python: 2.7 env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" + services: + - postgresql - python: 3.6 env: TOX_ENV=py36 From 5d848992bf986df3d7f1edd1be1a3680a9d19fb2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 15:28:46 +0100 Subject: [PATCH 052/138] Newsfile --- changelog.d/3806.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3806.misc diff --git a/changelog.d/3806.misc b/changelog.d/3806.misc new file mode 100644 index 0000000000..3c722eef2d --- /dev/null +++ b/changelog.d/3806.misc @@ -0,0 +1 @@ +Only start postgres instance for postgres tests on Travis CI From b07a2cbee959d84b0f9404c4e54fda21dc0cc4e4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 15:53:02 +0100 Subject: [PATCH 053/138] Spelling --- changelog.d/3805.misc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/3805.misc b/changelog.d/3805.misc index 8173f6da31..257feeb071 100644 --- a/changelog.d/3805.misc +++ b/changelog.d/3805.misc @@ -1 +1 @@ -Limit the number of PDUs/EDUs per fedreation transaction +Limit the number of PDUs/EDUs per federation transaction From 7baf66ef5d1575b40b638d4f21204e1646792612 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 16:46:51 +0100 Subject: [PATCH 054/138] Send existing room tags down sync on join When a user joined a room any existing tags were not sent down the sync stream. Ordinarily this isn't a problem because the user needs to be in the room to have set tags in it, however synapse will sometimes add tags for a user to a room, e.g. for server notices, which need to come down sync. --- synapse/handlers/sync.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9f133ded3f..150d492cb0 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1575,6 +1575,14 @@ class SyncHandler(object): newly_joined_room=newly_joined, ) + # When we join the room (or the client requests full_state), we should + # send down any existing tags. Usually the user won't have tags in a + # newly joined room, unless either a) they've joined before or b) the + # tag was added by synapse e.g. for server notice rooms. + if full_state: + user_id = sync_result_builder.sync_config.user.to_string() + tags = yield self.store.get_tags_for_room(user_id, room_id) + account_data_events = [] if tags is not None: account_data_events.append({ From f60c9e2a01598c1900ab785096aa379d1fb6ac5b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 17:01:41 +0100 Subject: [PATCH 055/138] Don't send empty tags list down sync --- synapse/handlers/sync.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 150d492cb0..7eed2fcc9b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1583,6 +1583,11 @@ class SyncHandler(object): user_id = sync_result_builder.sync_config.user.to_string() tags = yield self.store.get_tags_for_room(user_id, room_id) + # If there aren't any tags, don't send the empty tags list down + # sync + if not tags: + tags = None + account_data_events = [] if tags is not None: account_data_events.append({ From 7298efd3619110adbf35bd1bc048d8c9a3a05949 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Sep 2018 17:13:33 +0100 Subject: [PATCH 056/138] Newsfile --- changelog.d/3810.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3810.bugfix diff --git a/changelog.d/3810.bugfix b/changelog.d/3810.bugfix new file mode 100644 index 0000000000..2b938a81ae --- /dev/null +++ b/changelog.d/3810.bugfix @@ -0,0 +1 @@ +Fix existing room tags not coming down sync when joining a room From 84a750e0c36f653b71a06c564154f257c1b7dee3 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Sep 2018 17:22:53 +0100 Subject: [PATCH 057/138] ensure guests never enter mau list --- synapse/storage/monthly_active_users.py | 7 +++---- tests/storage/test_client_ips.py | 4 +--- tests/storage/test_monthly_active_users.py | 6 +++++- tests/utils.py | 1 + 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index e34e16c079..b890c152db 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -192,7 +192,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): )) @defer.inlineCallbacks - def populate_monthly_active_users(self, user_id, is_guest=False): + def populate_monthly_active_users(self, user_id): """Checks on the state of monthly active user limits and optionally add the user to the monthly active tables @@ -201,13 +201,12 @@ class MonthlyActiveUsersStore(SQLBaseStore): """ if self.hs.config.limit_usage_by_mau: - # Guests should not be included as part of MAU group + # Trial users and guests should not be included as part of MAU group + is_guest = yield self.is_guest(user_id) if is_guest: return - is_trial = yield self.is_trial_user(user_id) if is_trial: - # we don't track trial users in the MAU table. return last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index c2e88bdbaf..c9b02a062b 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -101,13 +101,11 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" + yield self.store.register(user_id=user_id, token="123", password_hash=None) active = yield self.store.user_last_seen_monthly_active(user_id) self.assertFalse(active) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" - ) yield self.store.insert_client_ip( user_id, "access_token", "ip", "user_agent", "device_id" ) diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index cfe0a7c77d..ccfc21b7fc 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -146,8 +146,12 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): def test_populate_monthly_users_is_guest(self): # Test that guest users are not added to mau list + user_id = "user_id" + self.store.register( + user_id=user_id, token="123", password_hash=None, make_guest=True + ) self.store.upsert_monthly_active_user = Mock() - self.store.populate_monthly_active_users('user_id', True) + self.store.populate_monthly_active_users(user_id) self.pump() self.store.upsert_monthly_active_user.assert_not_called() diff --git a/tests/utils.py b/tests/utils.py index 114470d641..f24bd81bf6 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -147,6 +147,7 @@ def setup_test_homeserver( config.hs_disabled_message = "" config.hs_disabled_limit_type = "" config.max_mau_value = 50 + config.mau_trial_days = 0 config.mau_limits_reserved_threepids = [] config.admin_contact = None config.rc_messages_per_second = 10000 From 52ec6e9dfad5e088f000d20cbb4a1bbc8008c00d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 7 Sep 2018 02:58:18 +1000 Subject: [PATCH 058/138] Port tests/ to Python 3 (#3808) --- changelog.d/3808.misc | 1 + tests/api/test_auth.py | 1 + tests/app/test_frontend_proxy.py | 4 +- tests/handlers/test_typing.py | 4 +- tests/rest/client/v1/test_rooms.py | 561 ++++++++---------- tests/rest/client/v2_alpha/test_sync.py | 8 +- tests/server.py | 2 +- .../test_resource_limits_server_notices.py | 30 +- tests/storage/test_state.py | 102 ++-- tests/test_mau.py | 30 +- tests/test_state.py | 2 +- tests/utils.py | 34 +- 12 files changed, 349 insertions(+), 430 deletions(-) create mode 100644 changelog.d/3808.misc diff --git a/changelog.d/3808.misc b/changelog.d/3808.misc new file mode 100644 index 0000000000..e5e1cd9e0e --- /dev/null +++ b/changelog.d/3808.misc @@ -0,0 +1 @@ +tests/ is now ported to Python 3. diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index f65a27e5f1..379e9c4ab1 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -471,6 +471,7 @@ class AuthTestCase(unittest.TestCase): def test_reserved_threepid(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 1 + self.store.get_monthly_active_count = lambda: defer.succeed(2) threepid = {'medium': 'email', 'address': 'reserved@server.com'} unknown_threepid = {'medium': 'email', 'address': 'unreserved@server.com'} self.hs.config.mau_limits_reserved_threepids = [threepid] diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index 76b5090fff..a83f567ebd 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -47,7 +47,7 @@ class FrontendProxyTests(HomeserverTestCase): self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] self.resource = ( - site.resource.children["_matrix"].children["client"].children["r0"] + site.resource.children[b"_matrix"].children[b"client"].children[b"r0"] ) request, channel = self.make_request("PUT", "presence/a/status") @@ -77,7 +77,7 @@ class FrontendProxyTests(HomeserverTestCase): self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] self.resource = ( - site.resource.children["_matrix"].children["client"].children["r0"] + site.resource.children[b"_matrix"].children[b"client"].children[b"r0"] ) request, channel = self.make_request("PUT", "presence/a/status") diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index c2d951b45f..36e136cded 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -43,9 +43,7 @@ def _expect_edu_transaction(edu_type, content, origin="test"): def _make_edu_transaction_json(edu_type, content): - return json.dumps(_expect_edu_transaction(edu_type, content)).encode( - 'utf8' - ) + return json.dumps(_expect_edu_transaction(edu_type, content)).encode('utf8') class TypingNotificationsTestCase(unittest.TestCase): diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 9fe0760496..359f7777ff 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -22,39 +22,24 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer -import synapse.rest.client.v1.room from synapse.api.constants import Membership -from synapse.http.server import JsonResource -from synapse.types import UserID -from synapse.util import Clock +from synapse.rest.client.v1 import room from tests import unittest -from tests.server import ( - ThreadedMemoryReactorClock, - make_request, - render, - setup_test_homeserver, -) - -from .utils import RestHelper PATH_PREFIX = b"/_matrix/client/api/v1" -class RoomBase(unittest.TestCase): +class RoomBase(unittest.HomeserverTestCase): rmcreator_id = None - def setUp(self): + servlets = [room.register_servlets, room.register_deprecated_servlets] - self.clock = ThreadedMemoryReactorClock() - self.hs_clock = Clock(self.clock) + def make_homeserver(self, reactor, clock): - self.hs = setup_test_homeserver( - self.addCleanup, + self.hs = self.setup_test_homeserver( "red", http_client=None, - clock=self.hs_clock, - reactor=self.clock, federation_client=Mock(), ratelimiter=NonCallableMock(spec_set=["send_message"]), ) @@ -63,42 +48,21 @@ class RoomBase(unittest.TestCase): self.hs.get_federation_handler = Mock(return_value=Mock()) - def get_user_by_access_token(token=None, allow_guest=False): - return { - "user": UserID.from_string(self.helper.auth_user_id), - "token_id": 1, - "is_guest": False, - } - - def get_user_by_req(request, allow_guest=False, rights="access"): - return synapse.types.create_requester( - UserID.from_string(self.helper.auth_user_id), 1, False, None - ) - - self.hs.get_auth().get_user_by_req = get_user_by_req - self.hs.get_auth().get_user_by_access_token = get_user_by_access_token - self.hs.get_auth().get_access_token_from_request = Mock(return_value=b"1234") - def _insert_client_ip(*args, **kwargs): return defer.succeed(None) self.hs.get_datastore().insert_client_ip = _insert_client_ip - self.resource = JsonResource(self.hs) - synapse.rest.client.v1.room.register_servlets(self.hs, self.resource) - synapse.rest.client.v1.room.register_deprecated_servlets(self.hs, self.resource) - self.helper = RestHelper(self.hs, self.resource, self.user_id) + return self.hs class RoomPermissionsTestCase(RoomBase): """ Tests room permissions. """ - user_id = b"@sid1:red" - rmcreator_id = b"@notme:red" + user_id = "@sid1:red" + rmcreator_id = "@notme:red" - def setUp(self): - - super(RoomPermissionsTestCase, self).setUp() + def prepare(self, reactor, clock, hs): self.helper.auth_user_id = self.rmcreator_id # create some rooms under the name rmcreator_id @@ -114,22 +78,20 @@ class RoomPermissionsTestCase(RoomBase): self.created_rmid_msg_path = ( "rooms/%s/send/m.room.message/a1" % (self.created_rmid) ).encode('ascii') - request, channel = make_request( - b"PUT", - self.created_rmid_msg_path, - b'{"msgtype":"m.text","body":"test msg"}', + request, channel = self.make_request( + "PUT", self.created_rmid_msg_path, b'{"msgtype":"m.text","body":"test msg"}' ) - render(request, self.resource, self.clock) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.render(request) + self.assertEquals(200, channel.code, channel.result) # set topic for public room - request, channel = make_request( - b"PUT", + request, channel = self.make_request( + "PUT", ("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode('ascii'), b'{"topic":"Public Room Topic"}', ) - render(request, self.resource, self.clock) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.render(request) + self.assertEquals(200, channel.code, channel.result) # auth as user_id now self.helper.auth_user_id = self.user_id @@ -140,128 +102,128 @@ class RoomPermissionsTestCase(RoomBase): seq = iter(range(100)) def send_msg_path(): - return b"/rooms/%s/send/m.room.message/mid%s" % ( + return "/rooms/%s/send/m.room.message/mid%s" % ( self.created_rmid, - str(next(seq)).encode('ascii'), + str(next(seq)), ) # send message in uncreated room, expect 403 - request, channel = make_request( - b"PUT", - b"/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), + request, channel = self.make_request( + "PUT", + "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), msg_content, ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room not joined (no state), expect 403 - request, channel = make_request(b"PUT", send_msg_path(), msg_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", send_msg_path(), msg_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) - request, channel = make_request(b"PUT", send_msg_path(), msg_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", send_msg_path(), msg_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) - request, channel = make_request(b"PUT", send_msg_path(), msg_content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", send_msg_path(), msg_content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # send message in created room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) - request, channel = make_request(b"PUT", send_msg_path(), msg_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", send_msg_path(), msg_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_topic_perms(self): topic_content = b'{"topic":"My Topic Name"}' - topic_path = b"/rooms/%s/state/m.room.topic" % self.created_rmid + topic_path = "/rooms/%s/state/m.room.topic" % self.created_rmid # set/get topic in uncreated room, expect 403 - request, channel = make_request( - b"PUT", b"/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content + request, channel = self.make_request( + "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) - request, channel = make_request( - b"GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) + request, channel = self.make_request( + "GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room not joined, expect 403 - request, channel = make_request(b"PUT", topic_path, topic_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) - request, channel = make_request(b"GET", topic_path) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", topic_path, topic_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) + request, channel = self.make_request("GET", topic_path) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # set topic in created PRIVATE room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) - request, channel = make_request(b"PUT", topic_path, topic_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", topic_path, topic_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # get topic in created PRIVATE room and invited, expect 403 - request, channel = make_request(b"GET", topic_path) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", topic_path) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) # Only room ops can set topic by default self.helper.auth_user_id = self.rmcreator_id - request, channel = make_request(b"PUT", topic_path, topic_content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", topic_path, topic_content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.helper.auth_user_id = self.user_id - request, channel = make_request(b"GET", topic_path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assert_dict(json.loads(topic_content), channel.json_body) + request, channel = self.make_request("GET", topic_path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assert_dict(json.loads(topic_content.decode('utf8')), channel.json_body) # set/get topic in created PRIVATE room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) - request, channel = make_request(b"PUT", topic_path, topic_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) - request, channel = make_request(b"GET", topic_path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", topic_path, topic_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) + request, channel = self.make_request("GET", topic_path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # get topic in PUBLIC room, not joined, expect 403 - request, channel = make_request( - b"GET", b"/rooms/%s/state/m.room.topic" % self.created_public_rmid + request, channel = self.make_request( + "GET", "/rooms/%s/state/m.room.topic" % self.created_public_rmid ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # set topic in PUBLIC room, not joined, expect 403 - request, channel = make_request( - b"PUT", - b"/rooms/%s/state/m.room.topic" % self.created_public_rmid, + request, channel = self.make_request( + "PUT", + "/rooms/%s/state/m.room.topic" % self.created_public_rmid, topic_content, ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) def _test_get_membership(self, room=None, members=[], expect_code=None): for member in members: - path = b"/rooms/%s/state/m.room.member/%s" % (room, member) - request, channel = make_request(b"GET", path) - render(request, self.resource, self.clock) - self.assertEquals(expect_code, int(channel.result["code"])) + path = "/rooms/%s/state/m.room.member/%s" % (room, member) + request, channel = self.make_request("GET", path) + self.render(request) + self.assertEquals(expect_code, channel.code) def test_membership_basic_room_perms(self): # === room does not exist === @@ -428,217 +390,211 @@ class RoomPermissionsTestCase(RoomBase): class RoomsMemberListTestCase(RoomBase): """ Tests /rooms/$room_id/members/list REST events.""" - user_id = b"@sid1:red" + user_id = "@sid1:red" def test_get_member_list(self): room_id = self.helper.create_room_as(self.user_id) - request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", "/rooms/%s/members" % room_id) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) def test_get_member_list_no_room(self): - request, channel = make_request(b"GET", b"/rooms/roomdoesnotexist/members") - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission(self): - room_id = self.helper.create_room_as(b"@some_other_guy:red") - request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + room_id = self.helper.create_room_as("@some_other_guy:red") + request, channel = self.make_request("GET", "/rooms/%s/members" % room_id) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_get_member_list_mixed_memberships(self): - room_creator = b"@some_other_guy:red" + room_creator = "@some_other_guy:red" room_id = self.helper.create_room_as(room_creator) - room_path = b"/rooms/%s/members" % room_id + room_path = "/rooms/%s/members" % room_id self.helper.invite(room=room_id, src=room_creator, targ=self.user_id) # can't see list if you're just invited. - request, channel = make_request(b"GET", room_path) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", room_path) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) self.helper.join(room=room_id, user=self.user_id) # can see list now joined - request, channel = make_request(b"GET", room_path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", room_path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.helper.leave(room=room_id, user=self.user_id) # can see old list once left - request, channel = make_request(b"GET", room_path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", room_path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) class RoomsCreateTestCase(RoomBase): """ Tests /rooms and /rooms/$room_id REST events. """ - user_id = b"@sid1:red" + user_id = "@sid1:red" def test_post_room_no_keys(self): # POST with no config keys, expect new room id - request, channel = make_request(b"POST", b"/createRoom", b"{}") + request, channel = self.make_request("POST", "/createRoom", "{}") - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), channel.result) + self.render(request) + self.assertEquals(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) def test_post_room_visibility_key(self): # POST with visibility config key, expect new room id - request, channel = make_request( - b"POST", b"/createRoom", b'{"visibility":"private"}' + request, channel = self.make_request( + "POST", "/createRoom", b'{"visibility":"private"}' ) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_custom_key(self): # POST with custom config keys, expect new room id - request, channel = make_request(b"POST", b"/createRoom", b'{"custom":"stuff"}') - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + request, channel = self.make_request( + "POST", "/createRoom", b'{"custom":"stuff"}' + ) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_known_and_unknown_keys(self): # POST with custom + known config keys, expect new room id - request, channel = make_request( - b"POST", b"/createRoom", b'{"visibility":"private","custom":"things"}' + request, channel = self.make_request( + "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' ) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_invalid_content(self): # POST with invalid content / paths, expect 400 - request, channel = make_request(b"POST", b"/createRoom", b'{"visibili') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"])) + request, channel = self.make_request("POST", "/createRoom", b'{"visibili') + self.render(request) + self.assertEquals(400, channel.code) - request, channel = make_request(b"POST", b"/createRoom", b'["hello"]') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"])) + request, channel = self.make_request("POST", "/createRoom", b'["hello"]') + self.render(request) + self.assertEquals(400, channel.code) class RoomTopicTestCase(RoomBase): """ Tests /rooms/$room_id/topic REST events. """ - user_id = b"@sid1:red" - - def setUp(self): - - super(RoomTopicTestCase, self).setUp() + user_id = "@sid1:red" + def prepare(self, reactor, clock, hs): # create the room self.room_id = self.helper.create_room_as(self.user_id) - self.path = b"/rooms/%s/state/m.room.topic" % (self.room_id,) + self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,) def test_invalid_puts(self): # missing keys or invalid json - request, channel = make_request(b"PUT", self.path, '{}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, '{}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", self.path, '{"_name":"bob"}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, '{"_name":"bo"}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", self.path, '{"nao') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, '{"nao') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request( - b"PUT", self.path, '[{"_name":"bob"},{"_name":"jill"}]' + request, channel = self.make_request( + "PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]' ) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", self.path, 'text only') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, 'text only') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", self.path, '') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, '') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) # valid key, wrong type content = '{"topic":["Topic name"]}' - request, channel = make_request(b"PUT", self.path, content) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, content) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_topic(self): # nothing should be there - request, channel = make_request(b"GET", self.path) - render(request, self.resource, self.clock) - self.assertEquals(404, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", self.path) + self.render(request) + self.assertEquals(404, channel.code, msg=channel.result["body"]) # valid put content = '{"topic":"Topic name"}' - request, channel = make_request(b"PUT", self.path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # valid get - request, channel = make_request(b"GET", self.path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", self.path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) def test_rooms_topic_with_extra_keys(self): # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' - request, channel = make_request(b"PUT", self.path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # valid get - request, channel = make_request(b"GET", self.path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", self.path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) class RoomMemberStateTestCase(RoomBase): """ Tests /rooms/$room_id/members/$user_id/state REST events. """ - user_id = b"@sid1:red" + user_id = "@sid1:red" - def setUp(self): - - super(RoomMemberStateTestCase, self).setUp() + def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.user_id) - def tearDown(self): - pass - def test_invalid_puts(self): path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json - request, channel = make_request(b"PUT", path, '{}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, '{}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '{"_name":"bob"}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, '{"_name":"bo"}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '{"nao') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, '{"nao') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request( - b"PUT", path, b'[{"_name":"bob"},{"_name":"jill"}]' + request, channel = self.make_request( + "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]' ) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, 'text only') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, 'text only') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, '') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) # valid keys, wrong types content = '{"membership":["%s","%s","%s"]}' % ( @@ -646,9 +602,9 @@ class RoomMemberStateTestCase(RoomBase): Membership.JOIN, Membership.LEAVE, ) - request, channel = make_request(b"PUT", path, content.encode('ascii')) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, content.encode('ascii')) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_members_self(self): path = "/rooms/%s/state/m.room.member/%s" % ( @@ -658,13 +614,13 @@ class RoomMemberStateTestCase(RoomBase): # valid join message (NOOP since we made the room) content = '{"membership":"%s"}' % Membership.JOIN - request, channel = make_request(b"PUT", path, content.encode('ascii')) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, content.encode('ascii')) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"GET", path, None) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", path, None) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) expected_response = {"membership": Membership.JOIN} self.assertEquals(expected_response, channel.json_body) @@ -678,13 +634,13 @@ class RoomMemberStateTestCase(RoomBase): # valid invite message content = '{"membership":"%s"}' % Membership.INVITE - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"GET", path, None) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", path, None) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assertEquals(json.loads(content), channel.json_body) def test_rooms_members_other_custom_keys(self): @@ -699,13 +655,13 @@ class RoomMemberStateTestCase(RoomBase): Membership.INVITE, "Join us!", ) - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"GET", path, None) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", path, None) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assertEquals(json.loads(content), channel.json_body) @@ -714,60 +670,58 @@ class RoomMessagesTestCase(RoomBase): user_id = "@sid1:red" - def setUp(self): - super(RoomMessagesTestCase, self).setUp() - + def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.user_id) def test_invalid_puts(self): path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) # missing keys or invalid json - request, channel = make_request(b"PUT", path, '{}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'{}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '{"_name":"bob"}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'{"_name":"bo"}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '{"nao') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'{"nao') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request( - b"PUT", path, '[{"_name":"bob"},{"_name":"jill"}]' + request, channel = self.make_request( + "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]' ) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, 'text only') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'text only') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_messages_sent(self): path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) - content = '{"body":"test","msgtype":{"type":"a"}}' - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + content = b'{"body":"test","msgtype":{"type":"a"}}' + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) # custom message types - content = '{"body":"test","msgtype":"test.custom.text"}' - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + content = b'{"body":"test","msgtype":"test.custom.text"}' + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # m.text message type path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id)) - content = '{"body":"test2","msgtype":"m.text"}' - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + content = b'{"body":"test2","msgtype":"m.text"}' + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) class RoomInitialSyncTestCase(RoomBase): @@ -775,16 +729,16 @@ class RoomInitialSyncTestCase(RoomBase): user_id = "@sid1:red" - def setUp(self): - super(RoomInitialSyncTestCase, self).setUp() - + def prepare(self, reactor, clock, hs): # create the room self.room_id = self.helper.create_room_as(self.user_id) def test_initial_sync(self): - request, channel = make_request(b"GET", "/rooms/%s/initialSync" % self.room_id) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + request, channel = self.make_request( + "GET", "/rooms/%s/initialSync" % self.room_id + ) + self.render(request) + self.assertEquals(200, channel.code) self.assertEquals(self.room_id, channel.json_body["room_id"]) self.assertEquals("join", channel.json_body["membership"]) @@ -819,17 +773,16 @@ class RoomMessageListTestCase(RoomBase): user_id = "@sid1:red" - def setUp(self): - super(RoomMessageListTestCase, self).setUp() + def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.user_id) def test_topo_token_is_accepted(self): token = "t1-0_0_0_0_0_0_0_0_0" - request, channel = make_request( - b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) + request, channel = self.make_request( + "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("start" in channel.json_body) self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) @@ -837,11 +790,11 @@ class RoomMessageListTestCase(RoomBase): def test_stream_token_is_accepted_for_fwd_pagianation(self): token = "s0_0_0_0_0_0_0_0_0" - request, channel = make_request( - b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) + request, channel = self.make_request( + "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("start" in channel.json_body) self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 560b1fba96..4c30c5f258 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -62,12 +62,6 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertTrue( set( - [ - "next_batch", - "rooms", - "account_data", - "to_device", - "device_lists", - ] + ["next_batch", "rooms", "account_data", "to_device", "device_lists"] ).issubset(set(channel.json_body.keys())) ) diff --git a/tests/server.py b/tests/server.py index 615bba1b59..a2c3ca61f6 100644 --- a/tests/server.py +++ b/tests/server.py @@ -65,7 +65,7 @@ class FakeChannel(object): def getPeer(self): # We give an address so that getClientIP returns a non null entry, # causing us to record the MAU - return address.IPv4Address(b"TCP", "127.0.0.1", 3423) + return address.IPv4Address("TCP", "127.0.0.1", 3423) def getHost(self): return None diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 5cc7fff39b..4701eedd45 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -80,12 +80,11 @@ class TestResourceLimitsServerNotices(unittest.TestCase): self._rlsn._auth.check_auth_blocking = Mock() mock_event = Mock( - type=EventTypes.Message, - content={"msgtype": ServerNoticeMsgType}, + type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} + ) + self._rlsn._store.get_events = Mock( + return_value=defer.succeed({"123": mock_event}) ) - self._rlsn._store.get_events = Mock(return_value=defer.succeed( - {"123": mock_event} - )) yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) # Would be better to check the content, but once == remove blocking event @@ -99,12 +98,11 @@ class TestResourceLimitsServerNotices(unittest.TestCase): ) mock_event = Mock( - type=EventTypes.Message, - content={"msgtype": ServerNoticeMsgType}, + type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} + ) + self._rlsn._store.get_events = Mock( + return_value=defer.succeed({"123": mock_event}) ) - self._rlsn._store.get_events = Mock(return_value=defer.succeed( - {"123": mock_event} - )) yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() @@ -177,13 +175,9 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase): @defer.inlineCallbacks def test_server_notice_only_sent_once(self): - self.store.get_monthly_active_count = Mock( - return_value=1000, - ) + self.store.get_monthly_active_count = Mock(return_value=1000) - self.store.user_last_seen_monthly_active = Mock( - return_value=1000, - ) + self.store.user_last_seen_monthly_active = Mock(return_value=1000) # Call the function multiple times to ensure we only send the notice once yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) @@ -193,12 +187,12 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase): # Now lets get the last load of messages in the service notice room and # check that there is only one server notice room_id = yield self.server_notices_manager.get_notice_room_for_user( - self.user_id, + self.user_id ) token = yield self.event_source.get_current_token() events, _ = yield self.store.get_recent_events_for_room( - room_id, limit=100, end_token=token.room_key, + room_id, limit=100, end_token=token.room_key ) count = 0 diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index d717b9f94e..b910965932 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -185,8 +185,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters out members with types=[] (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, - group, [], filtered_types=[EventTypes.Member] + self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] ) self.assertEqual(is_all, True) @@ -200,19 +199,20 @@ class StateStoreTestCase(tests.unittest.TestCase): (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [], filtered_types=[EventTypes.Member] + group, + [], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) - self.assertDictEqual( - {}, - state_dict, - ) + self.assertDictEqual({}, state_dict) # test _get_some_state_from_cache correctly filters in members with wildcard types (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_cache, - group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + group, + [(EventTypes.Member, None)], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) @@ -226,7 +226,9 @@ class StateStoreTestCase(tests.unittest.TestCase): (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + group, + [(EventTypes.Member, None)], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) @@ -264,18 +266,15 @@ class StateStoreTestCase(tests.unittest.TestCase): ) self.assertEqual(is_all, True) - self.assertDictEqual( - { - (e5.type, e5.state_key): e5.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) # test _get_some_state_from_cache correctly filters in members with specific types # and no filtered_types (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [(EventTypes.Member, e5.state_key)], filtered_types=None + group, + [(EventTypes.Member, e5.state_key)], + filtered_types=None, ) self.assertEqual(is_all, True) @@ -305,9 +304,7 @@ class StateStoreTestCase(tests.unittest.TestCase): key=group, value=state_dict_ids, # list fetched keys so it knows it's partial - fetched_keys=( - (e1.type, e1.state_key), - ), + fetched_keys=((e1.type, e1.state_key),), ) (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get( @@ -315,20 +312,8 @@ class StateStoreTestCase(tests.unittest.TestCase): ) self.assertEqual(is_all, False) - self.assertEqual( - known_absent, - set( - [ - (e1.type, e1.state_key), - ] - ), - ) - self.assertDictEqual( - state_dict_ids, - { - (e1.type, e1.state_key): e1.event_id, - }, - ) + self.assertEqual(known_absent, set([(e1.type, e1.state_key)])) + self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id}) ############################################ # test that things work with a partial cache @@ -336,8 +321,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters out members with types=[] room_id = self.room.to_string() (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, - group, [], filtered_types=[EventTypes.Member] + self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] ) self.assertEqual(is_all, False) @@ -346,7 +330,9 @@ class StateStoreTestCase(tests.unittest.TestCase): room_id = self.room.to_string() (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [], filtered_types=[EventTypes.Member] + group, + [], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) @@ -355,20 +341,19 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters in members wildcard types (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_cache, - group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + group, + [(EventTypes.Member, None)], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, False) - self.assertDictEqual( - { - (e1.type, e1.state_key): e1.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + group, + [(EventTypes.Member, None)], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) @@ -389,12 +374,7 @@ class StateStoreTestCase(tests.unittest.TestCase): ) self.assertEqual(is_all, False) - self.assertDictEqual( - { - (e1.type, e1.state_key): e1.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, @@ -404,18 +384,15 @@ class StateStoreTestCase(tests.unittest.TestCase): ) self.assertEqual(is_all, True) - self.assertDictEqual( - { - (e5.type, e5.state_key): e5.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) # test _get_some_state_from_cache correctly filters in members with specific types # and no filtered_types (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_cache, - group, [(EventTypes.Member, e5.state_key)], filtered_types=None + group, + [(EventTypes.Member, e5.state_key)], + filtered_types=None, ) self.assertEqual(is_all, False) @@ -423,13 +400,10 @@ class StateStoreTestCase(tests.unittest.TestCase): (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [(EventTypes.Member, e5.state_key)], filtered_types=None + group, + [(EventTypes.Member, e5.state_key)], + filtered_types=None, ) self.assertEqual(is_all, True) - self.assertDictEqual( - { - (e5.type, e5.state_key): e5.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) diff --git a/tests/test_mau.py b/tests/test_mau.py index 0732615447..bdbacb8448 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -185,20 +185,20 @@ class TestMauLimit(unittest.TestCase): self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def create_user(self, localpart): - request_data = json.dumps({ - "username": localpart, - "password": "monkey", - "auth": {"type": LoginType.DUMMY}, - }) + request_data = json.dumps( + { + "username": localpart, + "password": "monkey", + "auth": {"type": LoginType.DUMMY}, + } + ) - request, channel = make_request(b"POST", b"/register", request_data) + request, channel = make_request("POST", "/register", request_data) render(request, self.resource, self.reactor) - if channel.result["code"] != b"200": + if channel.code != 200: raise HttpResponseException( - int(channel.result["code"]), - channel.result["reason"], - channel.result["body"], + channel.code, channel.result["reason"], channel.result["body"] ).to_synapse_error() access_token = channel.json_body["access_token"] @@ -206,12 +206,12 @@ class TestMauLimit(unittest.TestCase): return access_token def do_sync_for_user(self, token): - request, channel = make_request(b"GET", b"/sync", access_token=token) + request, channel = make_request( + "GET", "/sync", access_token=token.encode('ascii') + ) render(request, self.resource, self.reactor) - if channel.result["code"] != b"200": + if channel.code != 200: raise HttpResponseException( - int(channel.result["code"]), - channel.result["reason"], - channel.result["body"], + channel.code, channel.result["reason"], channel.result["body"] ).to_synapse_error() diff --git a/tests/test_state.py b/tests/test_state.py index 452a123c3a..e20c33322a 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -180,7 +180,7 @@ class StateTestCase(unittest.TestCase): graph = Graph( nodes={ "START": DictObj( - type=EventTypes.Create, state_key="", content={}, depth=1, + type=EventTypes.Create, state_key="", content={}, depth=1 ), "A": DictObj(type=EventTypes.Message, depth=2), "B": DictObj(type=EventTypes.Message, depth=3), diff --git a/tests/utils.py b/tests/utils.py index 91173fa45d..215226debf 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -100,8 +100,13 @@ class TestHomeServer(HomeServer): @defer.inlineCallbacks def setup_test_homeserver( - cleanup_func, name="test", datastore=None, config=None, reactor=None, - homeserverToUse=TestHomeServer, **kargs + cleanup_func, + name="test", + datastore=None, + config=None, + reactor=None, + homeserverToUse=TestHomeServer, + **kargs ): """ Setup a homeserver suitable for running tests against. Keyword arguments @@ -323,8 +328,7 @@ class MockHttpResource(HttpServer): @patch('twisted.web.http.Request') @defer.inlineCallbacks def trigger( - self, http_method, path, content, mock_request, - federation_auth_origin=None, + self, http_method, path, content, mock_request, federation_auth_origin=None ): """ Fire an HTTP event. @@ -357,7 +361,7 @@ class MockHttpResource(HttpServer): headers = {} if federation_auth_origin is not None: headers[b"Authorization"] = [ - b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin, ) + b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,) ] mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers) @@ -577,16 +581,16 @@ def create_room(hs, room_id, creator_id): event_builder_factory = hs.get_event_builder_factory() event_creation_handler = hs.get_event_creation_handler() - builder = event_builder_factory.new({ - "type": EventTypes.Create, - "state_key": "", - "sender": creator_id, - "room_id": room_id, - "content": {}, - }) - - event, context = yield event_creation_handler.create_new_client_event( - builder + builder = event_builder_factory.new( + { + "type": EventTypes.Create, + "state_key": "", + "sender": creator_id, + "room_id": room_id, + "content": {}, + } ) + event, context = yield event_creation_handler.create_new_client_event(builder) + yield store.persist_event(event, context) From 806964b5de3e5036a897d4b5d477e7616779d019 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 6 Sep 2018 18:51:06 +0100 Subject: [PATCH 059/138] add some logging for the keyring queue why is it so damn slow? --- synapse/crypto/keyring.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 30e2742102..a6b31e8b5b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -40,6 +40,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.crypto.keyclient import fetch_server_key from synapse.util import logcontext, unwrapFirstError from synapse.util.logcontext import ( + LoggingContext, PreserveLoggingContext, preserve_fn, run_in_background, @@ -216,23 +217,34 @@ class Keyring(object): servers have completed. Follows the synapse rules of logcontext preservation. """ + loop_count = 1 while True: wait_on = [ - self.key_downloads[server_name] + (server_name, self.key_downloads[server_name]) for server_name in server_names if server_name in self.key_downloads ] - if wait_on: - with PreserveLoggingContext(): - yield defer.DeferredList(wait_on) - else: + if not wait_on: break + logger.info( + "Waiting for existing lookups for %s to complete [loop %i]", + [w[0] for w in wait_on], loop_count, + ) + with PreserveLoggingContext(): + yield defer.DeferredList((w[1] for w in wait_on)) + + loop_count += 1 + + ctx = LoggingContext.current_context() def rm(r, server_name_): - self.key_downloads.pop(server_name_, None) + with PreserveLoggingContext(ctx): + logger.debug("Releasing key lookup lock on %s", server_name_) + self.key_downloads.pop(server_name_, None) return r for server_name, deferred in server_to_deferred.items(): + logger.debug("Got key lookup lock on %s", server_name) self.key_downloads[server_name] = deferred deferred.addBoth(rm, server_name) From cd7ef4387205eaeb64b77860a82f782f91e41e93 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 6 Sep 2018 23:56:47 +0100 Subject: [PATCH 060/138] clearer logging when things fail, too --- synapse/federation/federation_base.py | 34 +++++++++++++++++++++------ synapse/storage/keys.py | 1 + 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 5be8e66fb8..61782ae1c0 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -143,11 +143,31 @@ class FederationBase(object): def callback(_, pdu): with logcontext.PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): - logger.warn( - "Event content has been tampered, redacting %s: %s", - pdu.event_id, pdu.get_pdu_json() - ) - return prune_event(pdu) + # let's try to distinguish between failures because the event was + # redacted (which are somewhat expected) vs actual ball-tampering + # incidents. + # + # This is just a heuristic, so we just assume that if the keys are + # about the same between the redacted and received events, then the + # received event was probably a redacted copy (but we then use our + # *actual* redacted copy to be on the safe side.) + redacted_event = prune_event(pdu) + if ( + set(six.iterkeys(redacted_event)) == set(six.iterkeys(pdu)) and + set(six.iterkeys(redacted_event.content)) + == set(six.iterkeys(pdu.content)) + ): + logger.info( + "Event %s seems to have been redacted; using our redacted " + "copy", + pdu.event_id, + ) + else: + logger.warning( + "Event %s content has been tampered, redacting", + pdu.event_id, pdu.get_pdu_json(), + ) + return redacted_event if self.spam_checker.check_event_for_spam(pdu): logger.warn( @@ -162,8 +182,8 @@ class FederationBase(object): failure.trap(SynapseError) with logcontext.PreserveLoggingContext(ctx): logger.warn( - "Signature check failed for %s", - pdu.event_id, + "Signature check failed for %s: %s", + pdu.event_id, failure.getErrorMessage(), ) return failure diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index f547977600..a1331c1a61 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -134,6 +134,7 @@ class KeyStore(SQLBaseStore): """ key_id = "%s:%s" % (verify_key.alg, verify_key.version) + # XXX fix this to not need a lock (#3819) def _txn(txn): self._simple_upsert_txn( txn, From 6febd8e8f737d28a9fec6cd2bfef0e80bea8437c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 7 Sep 2018 21:40:57 +1000 Subject: [PATCH 061/138] version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 063c304534..65a2b894cc 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.33.4rc2" +__version__ = "0.33.4" From b60749a1ec22269216a503ccd35aaf4454dc507d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 7 Sep 2018 21:41:57 +1000 Subject: [PATCH 062/138] changelog --- CHANGES.md | 9 +++++++++ changelog.d/3802.misc | 1 - 2 files changed, 9 insertions(+), 1 deletion(-) delete mode 100644 changelog.d/3802.misc diff --git a/CHANGES.md b/CHANGES.md index fecfa7feec..ee864c3c63 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 0.33.4 (2018-09-07) +=========================== + +Internal Changes +---------------- + +- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802)) + + Synapse 0.33.4rc2 (2018-09-06) ============================== diff --git a/changelog.d/3802.misc b/changelog.d/3802.misc deleted file mode 100644 index a00eccb283..0000000000 --- a/changelog.d/3802.misc +++ /dev/null @@ -1 +0,0 @@ -Unignore synctl in .dockerignore to fix docker builds From b8ad756bd0d7c42c7c241fa08f5e078561fddded Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 7 Sep 2018 14:20:54 +0100 Subject: [PATCH 063/138] Fix jwt import check This handy code attempted to check that we could import jwt, but utterly failed to check it was the right jwt. Fixes https://github.com/matrix-org/synapse/issues/3793 --- synapse/config/homeserver.py | 2 +- synapse/config/{jwt.py => jwt_config.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename synapse/config/{jwt.py => jwt_config.py} (100%) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 2fd9c48abf..b8d5690f2b 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -21,7 +21,7 @@ from .consent_config import ConsentConfig from .database import DatabaseConfig from .emailconfig import EmailConfig from .groups import GroupsConfig -from .jwt import JWTConfig +from .jwt_config import JWTConfig from .key import KeyConfig from .logger import LoggingConfig from .metrics import MetricsConfig diff --git a/synapse/config/jwt.py b/synapse/config/jwt_config.py similarity index 100% rename from synapse/config/jwt.py rename to synapse/config/jwt_config.py From edda9f5cac2b25386c42c6eabf2e5a7dae159986 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 7 Sep 2018 14:23:35 +0100 Subject: [PATCH 064/138] changelog --- changelog.d/3824.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3824.bugfix diff --git a/changelog.d/3824.bugfix b/changelog.d/3824.bugfix new file mode 100644 index 0000000000..99f199dcc6 --- /dev/null +++ b/changelog.d/3824.bugfix @@ -0,0 +1 @@ +Fix jwt import check \ No newline at end of file From 1e4c7fff5f30d195071e83190bb480f18cd94142 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 7 Sep 2018 16:37:30 +0100 Subject: [PATCH 065/138] changelog --- changelog.d/3826.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3826.misc diff --git a/changelog.d/3826.misc b/changelog.d/3826.misc new file mode 100644 index 0000000000..a4d9a012f9 --- /dev/null +++ b/changelog.d/3826.misc @@ -0,0 +1 @@ +add some logging for the keyring queue From b7e7712f07272e1612b47d9d174f0f55d0fdd247 Mon Sep 17 00:00:00 2001 From: Mathijs van Gorcum Date: Mon, 10 Sep 2018 12:21:42 +0000 Subject: [PATCH 066/138] Remove build requirements after building --- docker/Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 777976217d..f6da05f4ef 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -26,8 +26,9 @@ RUN cd /synapse \ && rm -rf \ setup.cfg \ setup.py \ - synapse - + synapse \ + && apk del .nacl_deps + VOLUME ["/data"] EXPOSE 8008/tcp 8448/tcp From 7bc22539ffb8375f92a1b26f6b4cdcfcfa9efd9a Mon Sep 17 00:00:00 2001 From: Krombel Date: Mon, 10 Sep 2018 14:30:08 +0200 Subject: [PATCH 067/138] fix VOIP crashes under Python 3 (#3821) --- synapse/rest/client/v1/voip.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 62f4c3d93e..53da905eea 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -42,7 +42,11 @@ class VoipRestServlet(ClientV1RestServlet): expiry = (self.hs.get_clock().time_msec() + userLifetime) / 1000 username = "%d:%s" % (expiry, requester.user.to_string()) - mac = hmac.new(turnSecret, msg=username, digestmod=hashlib.sha1) + mac = hmac.new( + turnSecret.encode(), + msg=username.encode(), + digestmod=hashlib.sha1 + ) # We need to use standard padded base64 encoding here # encode_base64 because we need to add the standard padding to get the # same result as the TURN server. From 3572a206d30ce178c9f7a2c852f43bb86238cf0b Mon Sep 17 00:00:00 2001 From: Krombel Date: Mon, 10 Sep 2018 14:33:08 +0200 Subject: [PATCH 068/138] add changelog --- changelog.d/3835.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3835.bugfix diff --git a/changelog.d/3835.bugfix b/changelog.d/3835.bugfix new file mode 100644 index 0000000000..00dbcbc8dc --- /dev/null +++ b/changelog.d/3835.bugfix @@ -0,0 +1 @@ +fix VOIP crashes under Python 3 (#3821) From e586916cda1f3549bd9ebab2e75d3824840c76a0 Mon Sep 17 00:00:00 2001 From: Mathijs van Gorcum Date: Mon, 10 Sep 2018 14:02:42 +0000 Subject: [PATCH 069/138] Move COPY before RUN and merge RUNs --- docker/Dockerfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index f6da05f4ef..b9f0acccc3 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,7 @@ FROM docker.io/python:2-alpine3.8 +COPY . /synapse + RUN apk add --no-cache --virtual .nacl_deps \ build-base \ libffi-dev \ @@ -9,12 +11,10 @@ RUN apk add --no-cache --virtual .nacl_deps \ linux-headers \ postgresql-dev \ su-exec \ - zlib-dev - -COPY . /synapse - + zlib-dev \ + # A wheel cache may be provided in ./cache for faster build -RUN cd /synapse \ + && cd /synapse \ && pip install --upgrade \ lxml \ pip \ From e957428a154ed9faf8b481947e40f74cdc65b75c Mon Sep 17 00:00:00 2001 From: Mathijs van Gorcum Date: Mon, 10 Sep 2018 14:53:05 +0000 Subject: [PATCH 070/138] Newsfile --- changelog.d/3834.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3834.misc diff --git a/changelog.d/3834.misc b/changelog.d/3834.misc new file mode 100644 index 0000000000..8902f8fba7 --- /dev/null +++ b/changelog.d/3834.misc @@ -0,0 +1 @@ +Improved Dockerfile to remove build requirements after building reducing the image size. From af10fa6536bf25630f4a290c756cd7f78e394a41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Mon, 10 Sep 2018 17:35:01 +0200 Subject: [PATCH 071/138] add runtime dependencies --- docker/Dockerfile | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index b9f0acccc3..20d3fe3bd8 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,7 +2,7 @@ FROM docker.io/python:2-alpine3.8 COPY . /synapse -RUN apk add --no-cache --virtual .nacl_deps \ +RUN apk add --no-cache --virtual .build_deps \ build-base \ libffi-dev \ libjpeg-turbo-dev \ @@ -10,11 +10,16 @@ RUN apk add --no-cache --virtual .nacl_deps \ libxslt-dev \ linux-headers \ postgresql-dev \ - su-exec \ zlib-dev \ - -# A wheel cache may be provided in ./cache for faster build && cd /synapse \ + && apk add --no-cache --virtual .runtime_deps \ + libffi \ + libjpeg-turbo \ + libressl \ + libxslt \ + libpq \ + zlib \ + su-exec \ && pip install --upgrade \ lxml \ pip \ @@ -27,7 +32,7 @@ RUN apk add --no-cache --virtual .nacl_deps \ setup.cfg \ setup.py \ synapse \ - && apk del .nacl_deps + && apk del .build_deps VOLUME ["/data"] From 9e05c8d3090a956054b5b39347359655236caaef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Sep 2018 10:42:10 +0100 Subject: [PATCH 072/138] Change the manhole SSH key to have more bits Newer versions of openssh client refuse to connect to the old key due to its length. --- synapse/util/manhole.py | 44 +++++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 14be3c7396..8d0f2a8918 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -19,22 +19,40 @@ from twisted.conch.ssh.keys import Key from twisted.cred import checkers, portal PUBLIC_KEY = ( - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az" - "64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJS" - "kbh/C+BR3utDS555mV" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHhGATaW4KhE23+7nrH4jFx3yLq9OjaEs5" + "XALqeK+7385NlLja3DE/DO9mGhnd9+bAy39EKT3sTV6+WXQ4yD0TvEEyUEMtjWkSEm6U32+C" + "DaS3TW/vPBUMeJQwq+Ydcif1UlnpXrDDTamD0AU9VaEvHq+3HAkipqn0TGpKON6aqk4vauDx" + "oXSsV5TXBVrxP/y7HpMOpU4GUWsaaacBTKKNnUaQB4UflvydaPJUuwdaCUJGTMjbhWrjVfK+" + "jslseSPxU6XvrkZMyCr4znxvuDxjMk1RGIdO7v+rbBMLEgqtSMNqJbYeVCnj2CFgc3fcTcld" + "X2uOJDrJb/WRlHulthCh" ) PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY----- -MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW -4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw -vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb -Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1 -xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8 -PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2 -gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu -DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML -pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP -EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg== +MIIEpQIBAAKCAQEAx4RgE2luCoRNt/u56x+Ixcd8i6vTo2hLOVwC6nivu9/OTZS4 +2twxPwzvZhoZ3ffmwMt/RCk97E1evll0OMg9E7xBMlBDLY1pEhJulN9vgg2kt01v +7zwVDHiUMKvmHXIn9VJZ6V6ww02pg9AFPVWhLx6vtxwJIqap9ExqSjjemqpOL2rg +8aF0rFeU1wVa8T/8ux6TDqVOBlFrGmmnAUyijZ1GkAeFH5b8nWjyVLsHWglCRkzI +24Vq41Xyvo7JbHkj8VOl765GTMgq+M58b7g8YzJNURiHTu7/q2wTCxIKrUjDaiW2 +HlQp49ghYHN33E3JXV9rjiQ6yW/1kZR7pbYQoQIDAQABAoIBAQC8KJ0q8Wzzwh5B +esa1dQHZ8+4DEsL/Amae66VcVwD0X3cCN1W2IZ7X5W0Ij2kBqr8V51RYhcR+S+Ek +BtzSiBUBvbKGrqcMGKaUgomDIMzai99hd0gvCCyZnEW1OQhFkNkaRNXCfqiZJ27M +fqvSUiU2eOwh9fCvmxoA6Of8o3FbzcJ+1GMcobWRllDtLmj6lgVbDzuA+0jC5daB +9Tj1pBzu3wn3ufxiS+gBnJ+7NcXH3E73lqCcPa2ufbZ1haxfiGCnRIhFXuQDgxFX +vKdEfDgtvas6r1ahGbc+b/q8E8fZT7cABuIU4yfOORK+MhpyWbvoyyzuVGKj3PKt +KSPJu5CZAoGBAOkoJfAVyYteqKcmGTanGqQnAY43CaYf6GdSPX/jg+JmKZg0zqMC +jWZUtPb93i+jnOInbrnuHOiHAxI8wmhEPed28H2lC/LU8PzlqFkZXKFZ4vLOhhRB +/HeHCFIDosPFlohWi3b+GAjD7sXgnIuGmnXWe2ea/TS3yersifDEoKKjAoGBANsQ +gJX2cJv1c3jhdgcs8vAt5zIOKcCLTOr/QPmVf/kxjNgndswcKHwsxE/voTO9q+TF +v/6yCSTxAdjuKz1oIYWgi/dZo82bBKWxNRpgrGviU3/zwxiHlyIXUhzQu78q3VS/ +7S1XVbc7qMV++XkYKHPVD+nVG/gGzFxumX7MLXfrAoGBAJit9cn2OnjNj9uFE1W6 +r7N254ndeLAUjPe73xH0RtTm2a4WRopwjW/JYIetTuYbWgyujc+robqTTuuOZjAp +H/CG7o0Ym251CypQqaFO/l2aowclPp/dZhpPjp9GSjuxFBZLtiBB3DNBOwbRQzIK +/vLTdRQvZkgzYkI4i0vjNt3JAoGBANP8HSKBLymMlShlrSx2b8TB9tc2Y2riohVJ +2ttqs0M2kt/dGJWdrgOz4mikL+983Olt/0P9juHDoxEEMK2kpcPEv40lnmBpYU7h +s8yJvnBLvJe2EJYdJ8AipyAhUX1FgpbvfxmASP8eaUxsegeXvBWTGWojAoS6N2o+ +0KSl+l3vAoGAFqm0gO9f/Q1Se60YQd4l2PZeMnJFv0slpgHHUwegmd6wJhOD7zJ1 +CkZcXwiv7Nog7AI9qKJEUXLjoqL+vJskBzSOqU3tcd670YQMi1aXSXJqYE202K7o +EddTrx3TNpr1D5m/f+6mnXWrc8u9y1+GNx9yz889xMjIBTBI9KqaaOs= -----END RSA PRIVATE KEY-----""" From 9a68778ac20f35ccd3aeb8b1392518a1f802955b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Sep 2018 10:44:40 +0100 Subject: [PATCH 073/138] Newsfile --- changelog.d/3841.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3841.bugfix diff --git a/changelog.d/3841.bugfix b/changelog.d/3841.bugfix new file mode 100644 index 0000000000..2a48a7dd66 --- /dev/null +++ b/changelog.d/3841.bugfix @@ -0,0 +1 @@ +Fix manhole so that it works with latest openssh clients From b041115415191f8353177f7e0c1beec81be0c921 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 00:50:39 +0100 Subject: [PATCH 074/138] Speed up lazy loading (#3827) * speed up room summaries by pulling their data from room_memberships rather than room state * disable LL for incr syncs, and log incr sync stats (#3840) --- changelog.d/3827.misc | 1 + changelog.d/3840.misc | 1 + synapse/handlers/sync.py | 118 +++++++++++++++++++++++++--------- synapse/storage/events.py | 4 ++ synapse/storage/roommember.py | 65 +++++++++++++++++++ 5 files changed, 159 insertions(+), 30 deletions(-) create mode 100644 changelog.d/3827.misc create mode 100644 changelog.d/3840.misc diff --git a/changelog.d/3827.misc b/changelog.d/3827.misc new file mode 100644 index 0000000000..bc294706cf --- /dev/null +++ b/changelog.d/3827.misc @@ -0,0 +1 @@ +speed up lazy loading by 2-3x diff --git a/changelog.d/3840.misc b/changelog.d/3840.misc new file mode 100644 index 0000000000..b9585ae9be --- /dev/null +++ b/changelog.d/3840.misc @@ -0,0 +1 @@ +Disable lazy loading for incremental syncs for now diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7eed2fcc9b..23983a51ab 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.push.clientformat import format_push_rules_for_user +from synapse.storage.roommember import MemberSummary from synapse.types import RoomStreamToken from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -525,6 +526,8 @@ class SyncHandler(object): A deferred dict describing the room summary """ + # FIXME: we could/should get this from room_stats when matthew/stats lands + # FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305 last_events, _ = yield self.store.get_recent_event_ids_for_room( room_id, end_token=now_token.room_key, limit=1, @@ -537,44 +540,54 @@ class SyncHandler(object): last_event = last_events[-1] state_ids = yield self.store.get_state_ids_for_event( last_event.event_id, [ - (EventTypes.Member, None), (EventTypes.Name, ''), (EventTypes.CanonicalAlias, ''), ] ) - member_ids = { - state_key: event_id - for (t, state_key), event_id in iteritems(state_ids) - if t == EventTypes.Member - } + # this is heavily cached, thus: fast. + details = yield self.store.get_room_summary(room_id) + name_id = state_ids.get((EventTypes.Name, '')) canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, '')) summary = {} - - # FIXME: it feels very heavy to load up every single membership event - # just to calculate the counts. - member_events = yield self.store.get_events(member_ids.values()) - - joined_user_ids = [] - invited_user_ids = [] - - for ev in member_events.values(): - if ev.content.get("membership") == Membership.JOIN: - joined_user_ids.append(ev.state_key) - elif ev.content.get("membership") == Membership.INVITE: - invited_user_ids.append(ev.state_key) + empty_ms = MemberSummary([], 0) # TODO: only send these when they change. - summary["m.joined_member_count"] = len(joined_user_ids) - summary["m.invited_member_count"] = len(invited_user_ids) + summary["m.joined_member_count"] = ( + details.get(Membership.JOIN, empty_ms).count + ) + summary["m.invited_member_count"] = ( + details.get(Membership.INVITE, empty_ms).count + ) if name_id or canonical_alias_id: defer.returnValue(summary) - # FIXME: order by stream ordering, not alphabetic + joined_user_ids = [ + r[0] for r in details.get(Membership.JOIN, empty_ms).members + ] + invited_user_ids = [ + r[0] for r in details.get(Membership.INVITE, empty_ms).members + ] + gone_user_ids = ( + [r[0] for r in details.get(Membership.LEAVE, empty_ms).members] + + [r[0] for r in details.get(Membership.BAN, empty_ms).members] + ) + # FIXME: only build up a member_ids list for our heroes + member_ids = {} + for membership in ( + Membership.JOIN, + Membership.INVITE, + Membership.LEAVE, + Membership.BAN + ): + for user_id, event_id in details.get(membership, empty_ms).members: + member_ids[user_id] = event_id + + # FIXME: order by stream ordering rather than as returned by SQL me = sync_config.user.to_string() if (joined_user_ids or invited_user_ids): summary['m.heroes'] = sorted( @@ -586,7 +599,11 @@ class SyncHandler(object): )[0:5] else: summary['m.heroes'] = sorted( - [user_id for user_id in member_ids.keys() if user_id != me] + [ + user_id + for user_id in gone_user_ids + if user_id != me + ] )[0:5] if not sync_config.filter_collection.lazy_load_members(): @@ -719,6 +736,26 @@ class SyncHandler(object): lazy_load_members=lazy_load_members, ) elif batch.limited: + state_at_timeline_start = yield self.store.get_state_ids_for_event( + batch.events[0].event_id, types=types, + filtered_types=filtered_types, + ) + + # for now, we disable LL for gappy syncs - see + # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 + # N.B. this slows down incr syncs as we are now processing way + # more state in the server than if we were LLing. + # + # We still have to filter timeline_start to LL entries (above) in order + # for _calculate_state's LL logic to work, as we have to include LL + # members for timeline senders in case they weren't loaded in the initial + # sync. We do this by (counterintuitively) by filtering timeline_start + # members to just be ones which were timeline senders, which then ensures + # all of the rest get included in the state block (if we need to know + # about them). + types = None + filtered_types = None + state_at_previous_sync = yield self.get_state_at( room_id, stream_position=since_token, types=types, filtered_types=filtered_types, @@ -729,24 +766,21 @@ class SyncHandler(object): filtered_types=filtered_types, ) - state_at_timeline_start = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, - ) - state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_at_timeline_start, previous=state_at_previous_sync, current=current_state_ids, + # we have to include LL members in case LL initial sync missed them lazy_load_members=lazy_load_members, ) else: state_ids = {} if lazy_load_members: if types: - # We're returning an incremental sync, with no "gap" since - # the previous sync, so normally there would be no state to return + # We're returning an incremental sync, with no + # "gap" since the previous sync, so normally there would be + # no state to return. # But we're lazy-loading, so the client might need some more # member events to understand the events in this timeline. # So we fish out all the member events corresponding to the @@ -1616,10 +1650,24 @@ class SyncHandler(object): ) summary = {} + + # we include a summary in room responses when we're lazy loading + # members (as the client otherwise doesn't have enough info to form + # the name itself). if ( sync_config.filter_collection.lazy_load_members() and ( + # we recalulate the summary: + # if there are membership changes in the timeline, or + # if membership has changed during a gappy sync, or + # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or + ( + # XXX: this may include false positives in the form of LL + # members which have snuck into state + batch.limited and + any(t == EventTypes.Member for (t, k) in state) + ) or since_token is None ) ): @@ -1649,6 +1697,16 @@ class SyncHandler(object): unread_notifications["highlight_count"] = notifs["highlight_count"] sync_result_builder.joined.append(room_sync) + + if batch.limited: + user_id = sync_result_builder.sync_config.user.to_string() + logger.info( + "Incremental syncing room %s for user %s with %d state events" % ( + room_id, + user_id, + len(state), + ) + ) elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 8bf87f38f7..30ff87a4c4 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -929,6 +929,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore txn, self.get_users_in_room, (room_id,) ) + self._invalidate_cache_and_stream( + txn, self.get_room_summary, (room_id,) + ) + self._invalidate_cache_and_stream( txn, self.get_current_state_ids, (room_id,) ) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 9b4e6d6aa8..0707f9a86a 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -51,6 +51,12 @@ ProfileInfo = namedtuple( "ProfileInfo", ("avatar_url", "display_name") ) +# "members" points to a truncated list of (user_id, event_id) tuples for users of +# a given membership type, suitable for use in calculating heroes for a room. +# "count" points to the total numberr of users of a given membership type. +MemberSummary = namedtuple( + "MemberSummary", ("members", "count") +) _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update" @@ -82,6 +88,65 @@ class RoomMemberWorkerStore(EventsWorkerStore): return [to_ascii(r[0]) for r in txn] return self.runInteraction("get_users_in_room", f) + @cached(max_entries=100000) + def get_room_summary(self, room_id): + """ Get the details of a room roughly suitable for use by the room + summary extension to /sync. Useful when lazy loading room members. + Args: + room_id (str): The room ID to query + Returns: + Deferred[dict[str, MemberSummary]: + dict of membership states, pointing to a MemberSummary named tuple. + """ + + def _get_room_summary_txn(txn): + # first get counts. + # We do this all in one transaction to keep the cache small. + # FIXME: get rid of this when we have room_stats + sql = """ + SELECT count(*), m.membership FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? + GROUP BY m.membership + """ + + txn.execute(sql, (room_id,)) + res = {} + for count, membership in txn: + summary = res.setdefault(to_ascii(membership), MemberSummary([], count)) + + # we order by membership and then fairly arbitrarily by event_id so + # heroes are consistent + sql = """ + SELECT m.user_id, m.membership, m.event_id + FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? + ORDER BY + CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, + m.event_id ASC + LIMIT ? + """ + + # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. + txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) + for user_id, membership, event_id in txn: + summary = res[to_ascii(membership)] + # we will always have a summary for this membership type at this + # point given the summary currently contains the counts. + members = summary.members + members.append((to_ascii(user_id), to_ascii(event_id))) + + return res + + return self.runInteraction("get_room_summary", _get_room_summary_txn) + @cached() def get_invited_rooms_for_user(self, user_id): """ Get all the rooms the user is invited to From 4084a774a89f0d02406eebda8279c2b8aab89812 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Sep 2018 09:59:15 +0100 Subject: [PATCH 075/138] Timeout reading body for outbound HTTP requests --- synapse/http/matrixfederationclient.py | 52 ++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 6a1fc8ca55..f9a1fbf95d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -280,7 +280,10 @@ class MatrixFederationHttpClient(object): # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): - body = yield treq.content(response) + body = yield self._timeout_deferred( + treq.content(response), + timeout, + ) raise HttpResponseException( response.code, response.phrase, body ) @@ -394,7 +397,10 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield treq.json_content(response) + body = yield self._timeout_deferred( + treq.json_content(response), + timeout, + ) defer.returnValue(body) @defer.inlineCallbacks @@ -444,7 +450,10 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield treq.json_content(response) + body = yield self._timeout_deferred( + treq.json_content(response), + timeout, + ) defer.returnValue(body) @@ -496,7 +505,10 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield treq.json_content(response) + body = yield self._timeout_deferred( + treq.json_content(response), + timeout, + ) defer.returnValue(body) @@ -543,7 +555,10 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield treq.json_content(response) + body = yield self._timeout_deferred( + treq.json_content(response), + timeout, + ) defer.returnValue(body) @@ -585,8 +600,10 @@ class MatrixFederationHttpClient(object): try: with logcontext.PreserveLoggingContext(): - length = yield _readBodyToFile( - response, output_stream, max_size + length = yield self._timeout_deferred( + _readBodyToFile( + response, output_stream, max_size + ), ) except Exception: logger.exception("Failed to download body") @@ -594,6 +611,27 @@ class MatrixFederationHttpClient(object): defer.returnValue((length, headers)) + def _timeout_deferred(self, deferred, timeout_ms=None): + """Times the deferred out after `timeout_ms` ms + + Args: + deferred (Deferred) + timeout_ms (int|None): Timeout in milliseconds. If None defaults + to 60 seconds. + + Returns: + Deferred + """ + + add_timeout_to_deferred( + deferred, + timeout_ms / 1000. if timeout_ms else 60, + self.hs.get_reactor(), + cancelled_to_request_timed_out_error, + ) + + return deferred + class _ReadBodyToFileProtocol(protocol.Protocol): def __init__(self, stream, deferred, max_size): From 649c647955dee037e9f7d0d0d81341cac020a901 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Sep 2018 10:10:32 +0100 Subject: [PATCH 076/138] Newsfile --- changelog.d/3845.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3845.bugfix diff --git a/changelog.d/3845.bugfix b/changelog.d/3845.bugfix new file mode 100644 index 0000000000..5b7e8f1934 --- /dev/null +++ b/changelog.d/3845.bugfix @@ -0,0 +1 @@ +Fix outbound requests occasionally wedging, which can result in federation breaking between servers. From 8fd93b5eeaeddce16e0b510741dc5d4768cbc78d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Sep 2018 20:16:31 +1000 Subject: [PATCH 077/138] Port crypto/ to Python 3 (#3822) --- changelog.d/3822.misc | 1 + synapse/crypto/context_factory.py | 2 +- synapse/crypto/keyclient.py | 8 +++++++- synapse/crypto/keyring.py | 9 +++++---- 4 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 changelog.d/3822.misc diff --git a/changelog.d/3822.misc b/changelog.d/3822.misc new file mode 100644 index 0000000000..5250f31896 --- /dev/null +++ b/changelog.d/3822.misc @@ -0,0 +1 @@ +crypto/ is now ported to Python 3. diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 1a391adec1..02b76dfcfb 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -123,6 +123,6 @@ class ClientTLSOptionsFactory(object): def get_options(self, host): return ClientTLSOptions( - host.decode('utf-8'), + host, CertificateOptions(verify=False).getContext() ) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index e94400b8e2..57d4665e84 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -50,7 +50,7 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): defer.returnValue((server_response, server_certificate)) except SynapseKeyClientError as e: logger.warn("Error getting key for %r: %s", server_name, e) - if e.status.startswith("4"): + if e.status.startswith(b"4"): # Don't retry for 4xx responses. raise IOError("Cannot get key for %r" % server_name) except (ConnectError, DomainError) as e: @@ -82,6 +82,12 @@ class SynapseKeyClientProtocol(HTTPClient): self._peer = self.transport.getPeer() logger.debug("Connected to %s", self._peer) + if not isinstance(self.path, bytes): + self.path = self.path.encode('ascii') + + if not isinstance(self.host, bytes): + self.host = self.host.encode('ascii') + self.sendCommand(b"GET", self.path) if self.host: self.sendHeader(b"Host", self.host) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 30e2742102..9d497abf17 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -16,9 +16,10 @@ import hashlib import logging -import urllib from collections import namedtuple +from six.moves import urllib + from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -432,7 +433,7 @@ class Keyring(object): # an incoming request. query_response = yield self.client.post_json( destination=perspective_name, - path=b"/_matrix/key/v2/query", + path="/_matrix/key/v2/query", data={ u"server_keys": { server_name: { @@ -513,8 +514,8 @@ class Keyring(object): (response, tls_certificate) = yield fetch_server_key( server_name, self.hs.tls_client_options_factory, - path=(b"/_matrix/key/v2/server/%s" % ( - urllib.quote(requested_key_id), + path=("/_matrix/key/v2/server/%s" % ( + urllib.parse.quote(requested_key_id), )).encode("ascii"), ) From 02aa41809bd48cc1dd02da3634f02f5eae71b41c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Sep 2018 20:41:31 +1000 Subject: [PATCH 078/138] Port rest/ to Python 3 (#3823) --- changelog.d/3823.misc | 1 + synapse/rest/client/v1/admin.py | 9 ++-- synapse/rest/client/v1/events.py | 12 ++--- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 44 +++++++++---------- synapse/rest/client/v1/push_rule.py | 24 +++++----- synapse/rest/client/v1/pusher.py | 4 +- synapse/rest/client/v1/room.py | 14 +++--- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/client/v2_alpha/thirdparty.py | 4 +- synapse/rest/key/v1/server_key_resource.py | 2 +- synapse/rest/key/v2/__init__.py | 4 +- synapse/rest/key/v2/remote_key_resource.py | 6 ++- synapse/rest/media/v0/content_repository.py | 4 +- synapse/rest/media/v1/_base.py | 30 ++++++------- synapse/rest/media/v1/download_resource.py | 12 ++--- synapse/rest/media/v1/media_repository.py | 29 +++++++----- synapse/rest/media/v1/preview_url_resource.py | 10 ++--- 18 files changed, 113 insertions(+), 100 deletions(-) create mode 100644 changelog.d/3823.misc diff --git a/changelog.d/3823.misc b/changelog.d/3823.misc new file mode 100644 index 0000000000..0da491ddaa --- /dev/null +++ b/changelog.d/3823.misc @@ -0,0 +1 @@ +rest/ is now ported to Python 3. diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index ad536ab570..41534b8c2a 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -101,7 +101,7 @@ class UserRegisterServlet(ClientV1RestServlet): nonce = self.hs.get_secrets().token_hex(64) self.nonces[nonce] = int(self.reactor.seconds()) - return (200, {"nonce": nonce.encode('ascii')}) + return (200, {"nonce": nonce}) @defer.inlineCallbacks def on_POST(self, request): @@ -164,7 +164,7 @@ class UserRegisterServlet(ClientV1RestServlet): key=self.hs.config.registration_shared_secret.encode(), digestmod=hashlib.sha1, ) - want_mac.update(nonce) + want_mac.update(nonce.encode('utf8')) want_mac.update(b"\x00") want_mac.update(username) want_mac.update(b"\x00") @@ -173,7 +173,10 @@ class UserRegisterServlet(ClientV1RestServlet): want_mac.update(b"admin" if admin else b"notadmin") want_mac = want_mac.hexdigest() - if not hmac.compare_digest(want_mac, got_mac.encode('ascii')): + if not hmac.compare_digest( + want_mac.encode('ascii'), + got_mac.encode('ascii') + ): raise SynapseError(403, "HMAC incorrect") # Reuse the parts of RegisterRestServlet to reduce code duplication diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 0f3a2e8b51..cd9b3bdbd1 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -45,20 +45,20 @@ class EventStreamRestServlet(ClientV1RestServlet): is_guest = requester.is_guest room_id = None if is_guest: - if "room_id" not in request.args: + if b"room_id" not in request.args: raise SynapseError(400, "Guest users must specify room_id param") - if "room_id" in request.args: - room_id = request.args["room_id"][0] + if b"room_id" in request.args: + room_id = request.args[b"room_id"][0].decode('ascii') pagin_config = PaginationConfig.from_request(request) timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS - if "timeout" in request.args: + if b"timeout" in request.args: try: - timeout = int(request.args["timeout"][0]) + timeout = int(request.args[b"timeout"][0]) except ValueError: raise SynapseError(400, "timeout must be in milliseconds.") - as_client_event = "raw" not in request.args + as_client_event = b"raw" not in request.args chunk = yield self.event_stream_handler.get_stream( requester.user.to_string(), diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index fd5f85b53e..3ead75cb77 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -32,7 +32,7 @@ class InitialSyncRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - as_client_event = "raw" not in request.args + as_client_event = b"raw" not in request.args pagination_config = PaginationConfig.from_request(request) include_archived = parse_boolean(request, "archived", default=False) content = yield self.initial_sync_handler.snapshot_all_rooms( diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index cb85fa1436..0010699d31 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -14,10 +14,9 @@ # limitations under the License. import logging -import urllib import xml.etree.ElementTree as ET -from six.moves.urllib import parse as urlparse +from six.moves import urllib from canonicaljson import json from saml2 import BINDING_HTTP_POST, config @@ -134,7 +133,7 @@ class LoginRestServlet(ClientV1RestServlet): LoginRestServlet.SAML2_TYPE): relay_state = "" if "relay_state" in login_submission: - relay_state = "&RelayState=" + urllib.quote( + relay_state = "&RelayState=" + urllib.parse.quote( login_submission["relay_state"]) result = { "uri": "%s%s" % (self.idp_redirect_url, relay_state) @@ -366,7 +365,7 @@ class SAML2RestServlet(ClientV1RestServlet): (user_id, token) = yield handler.register_saml2(username) # Forward to the RelayState callback along with ava if 'RelayState' in request.args: - request.redirect(urllib.unquote( + request.redirect(urllib.parse.unquote( request.args['RelayState'][0]) + '?status=authenticated&access_token=' + token + '&user_id=' + user_id + '&ava=' + @@ -377,7 +376,7 @@ class SAML2RestServlet(ClientV1RestServlet): "user_id": user_id, "token": token, "ava": saml2_auth.ava})) elif 'RelayState' in request.args: - request.redirect(urllib.unquote( + request.redirect(urllib.parse.unquote( request.args['RelayState'][0]) + '?status=not_authenticated') finish_request(request) @@ -390,21 +389,22 @@ class CasRedirectServlet(ClientV1RestServlet): def __init__(self, hs): super(CasRedirectServlet, self).__init__(hs) - self.cas_server_url = hs.config.cas_server_url - self.cas_service_url = hs.config.cas_service_url + self.cas_server_url = hs.config.cas_server_url.encode('ascii') + self.cas_service_url = hs.config.cas_service_url.encode('ascii') def on_GET(self, request): args = request.args - if "redirectUrl" not in args: + if b"redirectUrl" not in args: return (400, "Redirect URL not specified for CAS auth") - client_redirect_url_param = urllib.urlencode({ - "redirectUrl": args["redirectUrl"][0] - }) - hs_redirect_url = self.cas_service_url + "/_matrix/client/api/v1/login/cas/ticket" - service_param = urllib.urlencode({ - "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param) - }) - request.redirect("%s/login?%s" % (self.cas_server_url, service_param)) + client_redirect_url_param = urllib.parse.urlencode({ + b"redirectUrl": args[b"redirectUrl"][0] + }).encode('ascii') + hs_redirect_url = (self.cas_service_url + + b"/_matrix/client/api/v1/login/cas/ticket") + service_param = urllib.parse.urlencode({ + b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param) + }).encode('ascii') + request.redirect(b"%s/login?%s" % (self.cas_server_url, service_param)) finish_request(request) @@ -422,11 +422,11 @@ class CasTicketServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): - client_redirect_url = request.args["redirectUrl"][0] + client_redirect_url = request.args[b"redirectUrl"][0] http_client = self.hs.get_simple_http_client() uri = self.cas_server_url + "/proxyValidate" args = { - "ticket": request.args["ticket"], + "ticket": request.args[b"ticket"][0].decode('ascii'), "service": self.cas_service_url } try: @@ -471,11 +471,11 @@ class CasTicketServlet(ClientV1RestServlet): finish_request(request) def add_login_token_to_redirect_url(self, url, token): - url_parts = list(urlparse.urlparse(url)) - query = dict(urlparse.parse_qsl(url_parts[4])) + url_parts = list(urllib.parse.urlparse(url)) + query = dict(urllib.parse.parse_qsl(url_parts[4])) query.update({"loginToken": token}) - url_parts[4] = urllib.urlencode(query) - return urlparse.urlunparse(url_parts) + url_parts[4] = urllib.parse.urlencode(query).encode('ascii') + return urllib.parse.urlunparse(url_parts) def parse_cas_response(self, cas_response_body): user = None diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 6e95d9bec2..9382b1f124 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -46,7 +46,7 @@ class PushRuleRestServlet(ClientV1RestServlet): try: priority_class = _priority_class_from_spec(spec) except InvalidRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) requester = yield self.auth.get_user_by_req(request) @@ -73,7 +73,7 @@ class PushRuleRestServlet(ClientV1RestServlet): content, ) except InvalidRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) before = parse_string(request, "before") if before: @@ -95,9 +95,9 @@ class PushRuleRestServlet(ClientV1RestServlet): ) self.notify_user(user_id) except InconsistentRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) except RuleNotFoundException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) defer.returnValue((200, {})) @@ -142,10 +142,10 @@ class PushRuleRestServlet(ClientV1RestServlet): PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR ) - if path[0] == '': + if path[0] == b'': defer.returnValue((200, rules)) - elif path[0] == 'global': - path = path[1:] + elif path[0] == b'global': + path = [x.decode('ascii') for x in path[1:]] result = _filter_ruleset_with_path(rules['global'], path) defer.returnValue((200, result)) else: @@ -192,10 +192,10 @@ class PushRuleRestServlet(ClientV1RestServlet): def _rule_spec_from_path(path): if len(path) < 2: raise UnrecognizedRequestError() - if path[0] != 'pushrules': + if path[0] != b'pushrules': raise UnrecognizedRequestError() - scope = path[1] + scope = path[1].decode('ascii') path = path[2:] if scope != 'global': raise UnrecognizedRequestError() @@ -203,13 +203,13 @@ def _rule_spec_from_path(path): if len(path) == 0: raise UnrecognizedRequestError() - template = path[0] + template = path[0].decode('ascii') path = path[1:] if len(path) == 0 or len(path[0]) == 0: raise UnrecognizedRequestError() - rule_id = path[0] + rule_id = path[0].decode('ascii') spec = { 'scope': scope, @@ -220,7 +220,7 @@ def _rule_spec_from_path(path): path = path[1:] if len(path) > 0 and len(path[0]) > 0: - spec['attr'] = path[0] + spec['attr'] = path[0].decode('ascii') return spec diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 182a68b1e2..b84f0260f2 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -59,7 +59,7 @@ class PushersRestServlet(ClientV1RestServlet): ] for p in pushers: - for k, v in p.items(): + for k, v in list(p.items()): if k not in allowed_keys: del p[k] @@ -126,7 +126,7 @@ class PushersSetRestServlet(ClientV1RestServlet): profile_tag=content.get('profile_tag', ""), ) except PusherConfigException as pce: - raise SynapseError(400, "Config Error: " + pce.message, + raise SynapseError(400, "Config Error: " + str(pce), errcode=Codes.MISSING_PARAM) self.notifier.on_new_replication_data() diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 976d98387d..663934efd0 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -207,7 +207,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet): "sender": requester.user.to_string(), } - if 'ts' in request.args and requester.app_service: + if b'ts' in request.args and requester.app_service: event_dict['origin_server_ts'] = parse_integer(request, "ts", 0) event = yield self.event_creation_hander.create_and_send_nonmember_event( @@ -255,7 +255,9 @@ class JoinRoomAliasServlet(ClientV1RestServlet): if RoomID.is_valid(room_identifier): room_id = room_identifier try: - remote_room_hosts = request.args["server_name"] + remote_room_hosts = [ + x.decode('ascii') for x in request.args[b"server_name"] + ] except Exception: remote_room_hosts = None elif RoomAlias.is_valid(room_identifier): @@ -461,10 +463,10 @@ class RoomMessageListRestServlet(ClientV1RestServlet): pagination_config = PaginationConfig.from_request( request, default_limit=10, ) - as_client_event = "raw" not in request.args - filter_bytes = parse_string(request, "filter") + as_client_event = b"raw" not in request.args + filter_bytes = parse_string(request, b"filter", encoding=None) if filter_bytes: - filter_json = urlparse.unquote(filter_bytes).decode("UTF-8") + filter_json = urlparse.unquote(filter_bytes.decode("UTF-8")) event_filter = Filter(json.loads(filter_json)) else: event_filter = None @@ -560,7 +562,7 @@ class RoomEventContextServlet(ClientV1RestServlet): # picking the API shape for symmetry with /messages filter_bytes = parse_string(request, "filter") if filter_bytes: - filter_json = urlparse.unquote(filter_bytes).decode("UTF-8") + filter_json = urlparse.unquote(filter_bytes) event_filter = Filter(json.loads(filter_json)) else: event_filter = None diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 263d8eb73e..0251146722 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -89,7 +89,7 @@ class SyncRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request): - if "from" in request.args: + if b"from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index d9d379182e..b9b5d07677 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -79,7 +79,7 @@ class ThirdPartyUserServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) fields = request.args - fields.pop("access_token", None) + fields.pop(b"access_token", None) results = yield self.appservice_handler.query_3pe( ThirdPartyEntityKind.USER, protocol, fields @@ -102,7 +102,7 @@ class ThirdPartyLocationServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) fields = request.args - fields.pop("access_token", None) + fields.pop(b"access_token", None) results = yield self.appservice_handler.query_3pe( ThirdPartyEntityKind.LOCATION, protocol, fields diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py index b9ee6e1c13..38eb2ee23f 100644 --- a/synapse/rest/key/v1/server_key_resource.py +++ b/synapse/rest/key/v1/server_key_resource.py @@ -88,5 +88,5 @@ class LocalKey(Resource): ) def getChild(self, name, request): - if name == '': + if name == b'': return self diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py index 3491fd2118..cb5abcf826 100644 --- a/synapse/rest/key/v2/__init__.py +++ b/synapse/rest/key/v2/__init__.py @@ -22,5 +22,5 @@ from .remote_key_resource import RemoteKey class KeyApiV2Resource(Resource): def __init__(self, hs): Resource.__init__(self) - self.putChild("server", LocalKey(hs)) - self.putChild("query", RemoteKey(hs)) + self.putChild(b"server", LocalKey(hs)) + self.putChild(b"query", RemoteKey(hs)) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 7d67e4b064..eb8782aa6e 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -103,7 +103,7 @@ class RemoteKey(Resource): def async_render_GET(self, request): if len(request.postpath) == 1: server, = request.postpath - query = {server: {}} + query = {server.decode('ascii'): {}} elif len(request.postpath) == 2: server, key_id = request.postpath minimum_valid_until_ts = parse_integer( @@ -112,11 +112,12 @@ class RemoteKey(Resource): arguments = {} if minimum_valid_until_ts is not None: arguments["minimum_valid_until_ts"] = minimum_valid_until_ts - query = {server: {key_id: arguments}} + query = {server.decode('ascii'): {key_id.decode('ascii'): arguments}} else: raise SynapseError( 404, "Not found %r" % request.postpath, Codes.NOT_FOUND ) + yield self.query_keys(request, query, query_remote_on_cache_miss=True) def render_POST(self, request): @@ -135,6 +136,7 @@ class RemoteKey(Resource): @defer.inlineCallbacks def query_keys(self, request, query, query_remote_on_cache_miss=False): logger.info("Handling query for keys %r", query) + store_queries = [] for server_name, key_ids in query.items(): if ( diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index f255f2883f..5a426ff2f6 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -56,7 +56,7 @@ class ContentRepoResource(resource.Resource): # servers. # TODO: A little crude here, we could do this better. - filename = request.path.split('/')[-1] + filename = request.path.decode('ascii').split('/')[-1] # be paranoid filename = re.sub("[^0-9A-z.-_]", "", filename) @@ -78,7 +78,7 @@ class ContentRepoResource(resource.Resource): # select private. don't bother setting Expires as all our matrix # clients are smart enough to be happy with Cache-Control (right?) request.setHeader( - "Cache-Control", "public,max-age=86400,s-maxage=86400" + b"Cache-Control", b"public,max-age=86400,s-maxage=86400" ) d = FileSender().beginFileTransfer(f, request) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 65f4bd2910..76e479afa3 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -15,9 +15,8 @@ import logging import os -import urllib -from six.moves.urllib import parse as urlparse +from six.moves import urllib from twisted.internet import defer from twisted.protocols.basic import FileSender @@ -35,10 +34,15 @@ def parse_media_id(request): # This allows users to append e.g. /test.png to the URL. Useful for # clients that parse the URL to see content type. server_name, media_id = request.postpath[:2] + + if isinstance(server_name, bytes): + server_name = server_name.decode('utf-8') + media_id = media_id.decode('utf8') + file_name = None if len(request.postpath) > 2: try: - file_name = urlparse.unquote(request.postpath[-1]).decode("utf-8") + file_name = urllib.parse.unquote(request.postpath[-1].decode("utf-8")) except UnicodeDecodeError: pass return server_name, media_id, file_name @@ -93,22 +97,18 @@ def add_file_headers(request, media_type, file_size, upload_name): file_size (int): Size in bytes of the media, if known. upload_name (str): The name of the requested file, if any. """ + def _quote(x): + return urllib.parse.quote(x.encode("utf-8")) + request.setHeader(b"Content-Type", media_type.encode("UTF-8")) if upload_name: if is_ascii(upload_name): - request.setHeader( - b"Content-Disposition", - b"inline; filename=%s" % ( - urllib.quote(upload_name.encode("utf-8")), - ), - ) + disposition = ("inline; filename=%s" % (_quote(upload_name),)).encode("ascii") else: - request.setHeader( - b"Content-Disposition", - b"inline; filename*=utf-8''%s" % ( - urllib.quote(upload_name.encode("utf-8")), - ), - ) + disposition = ( + "inline; filename*=utf-8''%s" % (_quote(upload_name),)).encode("ascii") + + request.setHeader(b"Content-Disposition", disposition) # cache for at least a day. # XXX: we might want to turn this off for data we don't want to diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index fbfa85f74f..ca90964d1d 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -47,12 +47,12 @@ class DownloadResource(Resource): def _async_render_GET(self, request): set_cors_headers(request) request.setHeader( - "Content-Security-Policy", - "default-src 'none';" - " script-src 'none';" - " plugin-types application/pdf;" - " style-src 'unsafe-inline';" - " object-src 'self';" + b"Content-Security-Policy", + b"default-src 'none';" + b" script-src 'none';" + b" plugin-types application/pdf;" + b" style-src 'unsafe-inline';" + b" object-src 'self';" ) server_name, media_id, name = parse_media_id(request) if server_name == self.server_name: diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 241c972070..a828ff4438 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -20,7 +20,7 @@ import logging import os import shutil -from six import iteritems +from six import PY3, iteritems from six.moves.urllib import parse as urlparse import twisted.internet.error @@ -397,13 +397,13 @@ class MediaRepository(object): yield finish() - media_type = headers["Content-Type"][0] + media_type = headers[b"Content-Type"][0].decode('ascii') time_now_ms = self.clock.time_msec() - content_disposition = headers.get("Content-Disposition", None) + content_disposition = headers.get(b"Content-Disposition", None) if content_disposition: - _, params = cgi.parse_header(content_disposition[0],) + _, params = cgi.parse_header(content_disposition[0].decode('ascii'),) upload_name = None # First check if there is a valid UTF-8 filename @@ -419,9 +419,13 @@ class MediaRepository(object): upload_name = upload_name_ascii if upload_name: - upload_name = urlparse.unquote(upload_name) + if PY3: + upload_name = urlparse.unquote(upload_name) + else: + upload_name = urlparse.unquote(upload_name.encode('ascii')) try: - upload_name = upload_name.decode("utf-8") + if isinstance(upload_name, bytes): + upload_name = upload_name.decode("utf-8") except UnicodeDecodeError: upload_name = None else: @@ -755,14 +759,15 @@ class MediaRepositoryResource(Resource): Resource.__init__(self) media_repo = hs.get_media_repository() - self.putChild("upload", UploadResource(hs, media_repo)) - self.putChild("download", DownloadResource(hs, media_repo)) - self.putChild("thumbnail", ThumbnailResource( + + self.putChild(b"upload", UploadResource(hs, media_repo)) + self.putChild(b"download", DownloadResource(hs, media_repo)) + self.putChild(b"thumbnail", ThumbnailResource( hs, media_repo, media_repo.media_storage, )) - self.putChild("identicon", IdenticonResource()) + self.putChild(b"identicon", IdenticonResource()) if hs.config.url_preview_enabled: - self.putChild("preview_url", PreviewUrlResource( + self.putChild(b"preview_url", PreviewUrlResource( hs, media_repo, media_repo.media_storage, )) - self.putChild("config", MediaConfigResource(hs)) + self.putChild(b"config", MediaConfigResource(hs)) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 778ef97337..cad2dec33a 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -261,7 +261,7 @@ class PreviewUrlResource(Resource): logger.debug("Calculated OG for %s as %s" % (url, og)) - jsonog = json.dumps(og) + jsonog = json.dumps(og).encode('utf8') # store OG in history-aware DB cache yield self.store.store_url_cache( @@ -301,20 +301,20 @@ class PreviewUrlResource(Resource): logger.warn("Error downloading %s: %r", url, e) raise SynapseError( 500, "Failed to download content: %s" % ( - traceback.format_exception_only(sys.exc_type, e), + traceback.format_exception_only(sys.exc_info()[0], e), ), Codes.UNKNOWN, ) yield finish() try: - if "Content-Type" in headers: - media_type = headers["Content-Type"][0] + if b"Content-Type" in headers: + media_type = headers[b"Content-Type"][0].decode('ascii') else: media_type = "application/octet-stream" time_now_ms = self.clock.time_msec() - content_disposition = headers.get("Content-Disposition", None) + content_disposition = headers.get(b"Content-Disposition", None) if content_disposition: _, params = cgi.parse_header(content_disposition[0],) download_name = None From 0ddf48672421aa64920342b959dc77432c869889 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 12 Sep 2018 11:58:52 +0100 Subject: [PATCH 079/138] expose number of real reserved users --- synapse/app/homeserver.py | 10 ++++--- synapse/storage/monthly_active_users.py | 17 ++++++++++++ tests/storage/test_monthly_active_users.py | 31 ++++++++++++++++++++++ 3 files changed, 55 insertions(+), 3 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3eb5b663de..e6372bcec2 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -307,6 +307,7 @@ class SynapseHomeServer(HomeServer): # Gauges to expose monthly active user control metrics current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") +reserved_mau_gauge = Gauge("synapse_admin_mau:reserved", "Reserved real MAU users") def setup(config_options): @@ -531,10 +532,13 @@ def run(hs): @defer.inlineCallbacks def generate_monthly_active_users(): - count = 0 + current_mau_count = 0 + reserved_mau_count = 0 if hs.config.limit_usage_by_mau: - count = yield hs.get_datastore().get_monthly_active_count() - current_mau_gauge.set(float(count)) + current_mau_count = yield hs.get_datastore().get_monthly_active_count() + reserved_mau_count = yield hs.get_datastore().get_reserved_real_user_account() + current_mau_gauge.set(float(current_mau_count)) + reserved_mau_gauge.set(float(reserved_mau_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) hs.get_datastore().initialise_reserved_users( diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index b890c152db..53d125d305 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -146,6 +146,23 @@ class MonthlyActiveUsersStore(SQLBaseStore): return count return self.runInteraction("count_users", _count_users) + @defer.inlineCallbacks + def get_reserved_real_user_account(self): + """Of the reserved threepids defined in config, how many are associated + with registered users? + + Returns: + Defered[int]: Number of real reserved users + """ + count = 0 + for tp in self.hs.config.mau_limits_reserved_threepids: + user_id = yield self.hs.get_datastore().get_user_id_by_threepid( + tp["medium"], tp["address"] + ) + if user_id: + count = count + 1 + defer.returnValue(count) + @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): """ diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index ccfc21b7fc..662f2ed845 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -183,3 +183,34 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): self.store.populate_monthly_active_users('user_id') self.pump() self.store.upsert_monthly_active_user.assert_not_called() + + def test_get_reserved_real_user_account(self): + # Test no reserved users, or reserved threepids + count = self.store.get_reserved_real_user_account() + self.assertEquals(self.get_success(count), 0) + # Test reserved users but no registered users + + user1 = '@user1:example.com' + user2 = '@user2:example.com' + user1_email = 'user1@example.com' + user2_email = 'user2@example.com' + threepids = [ + {'medium': 'email', 'address': user1_email}, + {'medium': 'email', 'address': user2_email}, + ] + self.hs.config.mau_limits_reserved_threepids = threepids + self.store.initialise_reserved_users(threepids) + self.pump() + count = self.store.get_reserved_real_user_account() + self.assertEquals(self.get_success(count), 0) + + # Test reserved registed users + self.store.register(user_id=user1, token="123", password_hash=None) + self.store.register(user_id=user2, token="456", password_hash=None) + self.pump() + + now = int(self.hs.get_clock().time_msec()) + self.store.user_add_threepid(user1, "email", user1_email, now, now) + self.store.user_add_threepid(user2, "email", user2_email, now, now) + count = self.store.get_reserved_real_user_account() + self.assertEquals(self.get_success(count), len(threepids)) From 5cea4e16c7c6e7b38ff294cb77b4ef8f51d8b76e Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 12 Sep 2018 12:03:31 +0100 Subject: [PATCH 080/138] towncrier --- changelog.d/3846.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3846.feature diff --git a/changelog.d/3846.feature b/changelog.d/3846.feature new file mode 100644 index 0000000000..dc9e9d86cc --- /dev/null +++ b/changelog.d/3846.feature @@ -0,0 +1 @@ +create synapse_admin_mau:reserved metric to expose number of real reaserved users From 7ca097f77eba19403b4169e23404678aa2c7cd91 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Sep 2018 23:23:32 +1000 Subject: [PATCH 081/138] Port federation/ to py3 (#3847) --- changelog.d/3847.misc | 1 + synapse/federation/federation_client.py | 8 ++++---- synapse/federation/transport/client.py | 5 +++-- synapse/federation/transport/server.py | 24 +++++++++++------------- 4 files changed, 19 insertions(+), 19 deletions(-) create mode 100644 changelog.d/3847.misc diff --git a/changelog.d/3847.misc b/changelog.d/3847.misc new file mode 100644 index 0000000000..bf8b5afea4 --- /dev/null +++ b/changelog.d/3847.misc @@ -0,0 +1 @@ +federation/ is now ported to Python 3. \ No newline at end of file diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c9f3c2d352..fe67b2ff42 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -271,10 +271,10 @@ class FederationClient(FederationBase): event_id, destination, e, ) except NotRetryingDestination as e: - logger.info(e.message) + logger.info(str(e)) continue except FederationDeniedError as e: - logger.info(e.message) + logger.info(str(e)) continue except Exception as e: pdu_attempts[destination] = now @@ -510,7 +510,7 @@ class FederationClient(FederationBase): else: logger.warn( "Failed to %s via %s: %i %s", - description, destination, e.code, e.message, + description, destination, e.code, e.args[0], ) except Exception: logger.warn( @@ -875,7 +875,7 @@ class FederationClient(FederationBase): except Exception as e: logger.exception( "Failed to send_third_party_invite via %s: %s", - destination, e.message + destination, str(e) ) raise RuntimeError("Failed to send to any server.") diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1054441ca5..2ab973d6c8 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -15,7 +15,8 @@ # limitations under the License. import logging -import urllib + +from six.moves import urllib from twisted.internet import defer @@ -951,4 +952,4 @@ def _create_path(prefix, path, *args): Returns: str """ - return prefix + path % tuple(urllib.quote(arg, "") for arg in args) + return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 3972922ff9..2f874b4838 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -90,8 +90,8 @@ class Authenticator(object): @defer.inlineCallbacks def authenticate_request(self, request, content): json_request = { - "method": request.method, - "uri": request.uri, + "method": request.method.decode('ascii'), + "uri": request.uri.decode('ascii'), "destination": self.server_name, "signatures": {}, } @@ -252,7 +252,7 @@ class BaseFederationServlet(object): by the callback method. None if the request has already been handled. """ content = None - if request.method in ["PUT", "POST"]: + if request.method in [b"PUT", b"POST"]: # TODO: Handle other method types? other content types? content = parse_json_object_from_request(request) @@ -386,7 +386,7 @@ class FederationStateServlet(BaseFederationServlet): return self.handler.on_context_state_request( origin, context, - query.get("event_id", [None])[0], + parse_string_from_args(query, "event_id", None), ) @@ -397,7 +397,7 @@ class FederationStateIdsServlet(BaseFederationServlet): return self.handler.on_state_ids_request( origin, room_id, - query.get("event_id", [None])[0], + parse_string_from_args(query, "event_id", None), ) @@ -405,14 +405,12 @@ class FederationBackfillServlet(BaseFederationServlet): PATH = "/backfill/(?P[^/]*)/" def on_GET(self, origin, content, query, context): - versions = query["v"] - limits = query["limit"] + versions = [x.decode('ascii') for x in query[b"v"]] + limit = parse_integer_from_args(query, "limit", None) - if not limits: + if not limit: return defer.succeed((400, {"error": "Did not include limit param"})) - limit = int(limits[-1]) - return self.handler.on_backfill_request(origin, context, versions, limit) @@ -423,7 +421,7 @@ class FederationQueryServlet(BaseFederationServlet): def on_GET(self, origin, content, query, query_type): return self.handler.on_query_request( query_type, - {k: v[0].decode("utf-8") for k, v in query.items()} + {k.decode('utf8'): v[0].decode("utf-8") for k, v in query.items()} ) @@ -630,14 +628,14 @@ class OpenIdUserInfo(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query): - token = query.get("access_token", [None])[0] + token = query.get(b"access_token", [None])[0] if token is None: defer.returnValue((401, { "errcode": "M_MISSING_TOKEN", "error": "Access Token required" })) return - user_id = yield self.handler.on_openid_userinfo(token) + user_id = yield self.handler.on_openid_userinfo(token.decode('ascii')) if user_id is None: defer.returnValue((401, { From 65cd8ccc795657c3ff96b41712a95ff890a0d184 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Sep 2018 23:29:21 +1000 Subject: [PATCH 082/138] Add JUnit summaries to CircleCI as well as merged runs (#3704) --- .circleci/config.yml | 66 ++++++++++++++++++++++++++++++++++ .circleci/merge_base_branch.sh | 27 ++++++++++++++ changelog.d/3704.misc | 1 + 3 files changed, 94 insertions(+) create mode 100755 .circleci/merge_base_branch.sh create mode 100644 changelog.d/3704.misc diff --git a/.circleci/config.yml b/.circleci/config.yml index e03f01b837..5266544f3c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,6 +9,8 @@ jobs: - store_artifacts: path: ~/project/logs destination: logs + - store_test_results: + path: logs sytestpy2postgres: machine: true steps: @@ -18,6 +20,34 @@ jobs: - store_artifacts: path: ~/project/logs destination: logs + - store_test_results: + path: logs + sytestpy2merged: + machine: true + steps: + - checkout + - run: bash .circleci/merge_base_branch.sh + - run: docker pull matrixdotorg/sytest-synapsepy2 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2 + - store_artifacts: + path: ~/project/logs + destination: logs + - store_test_results: + path: logs + + sytestpy2postgresmerged: + machine: true + steps: + - checkout + - run: bash .circleci/merge_base_branch.sh + - run: docker pull matrixdotorg/sytest-synapsepy2 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2 + - store_artifacts: + path: ~/project/logs + destination: logs + - store_test_results: + path: logs + sytestpy3: machine: true steps: @@ -27,6 +57,8 @@ jobs: - store_artifacts: path: ~/project/logs destination: logs + - store_test_results: + path: logs sytestpy3postgres: machine: true steps: @@ -36,6 +68,32 @@ jobs: - store_artifacts: path: ~/project/logs destination: logs + - store_test_results: + path: logs + sytestpy3merged: + machine: true + steps: + - checkout + - run: bash .circleci/merge_base_branch.sh + - run: docker pull matrixdotorg/sytest-synapsepy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3 + - store_artifacts: + path: ~/project/logs + destination: logs + - store_test_results: + path: logs + sytestpy3postgresmerged: + machine: true + steps: + - checkout + - run: bash .circleci/merge_base_branch.sh + - run: docker pull matrixdotorg/sytest-synapsepy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3 + - store_artifacts: + path: ~/project/logs + destination: logs + - store_test_results: + path: logs workflows: version: 2 @@ -43,6 +101,14 @@ workflows: jobs: - sytestpy2 - sytestpy2postgres + - sytestpy2merged: + filters: + branches: + ignore: /develop|master/ + - sytestpy2postgresmerged: + filters: + branches: + ignore: /develop|master/ # Currently broken while the Python 3 port is incomplete # - sytestpy3 # - sytestpy3postgres diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh new file mode 100755 index 0000000000..2d700dbf11 --- /dev/null +++ b/.circleci/merge_base_branch.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e + +# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful. +# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL. +echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> "$BASH_ENV" +source $BASH_ENV + +if [[ -z "${CIRCLE_PR_NUMBER}" ]] +then + echo "Can't figure out what the PR number is!" + exit 1 +fi + +# Get the reference, using the GitHub API +GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'` + +# Show what we are before +git show -s + +# Fetch and merge. If it doesn't work, it will raise due to set -e. +git fetch -u origin $GITBASE +git merge --no-edit origin/$GITBASE + +# Show what we are after. +git show -s \ No newline at end of file diff --git a/changelog.d/3704.misc b/changelog.d/3704.misc new file mode 100644 index 0000000000..aaae0fbd63 --- /dev/null +++ b/changelog.d/3704.misc @@ -0,0 +1 @@ +CircleCI tests now run on the potential merge of a PR. From 8c5b84441bdcc0f321ca9a58e8ec20cb35cdb456 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Sep 2018 16:22:14 +0100 Subject: [PATCH 083/138] Log outbound requests when we retry --- synapse/http/matrixfederationclient.py | 145 ++++++++++++------------- 1 file changed, 72 insertions(+), 73 deletions(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index f9a1fbf95d..cf920bc041 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -177,11 +177,6 @@ class MatrixFederationHttpClient(object): txn_id = "%s-O-%s" % (method, self._next_id) self._next_id = (self._next_id + 1) % (MAXINT - 1) - outbound_logger.info( - "{%s} [%s] Sending request: %s %s", - txn_id, destination, method, url - ) - # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: @@ -194,85 +189,89 @@ class MatrixFederationHttpClient(object): ).decode('ascii') log_result = None - try: - while True: - try: - if json_callback: - json = json_callback() + while True: + try: + if json_callback: + json = json_callback() - if json: - data = encode_canonical_json(json) - headers_dict["Content-Type"] = ["application/json"] - self.sign_request( - destination, method, http_url, headers_dict, json - ) - else: - data = None - self.sign_request(destination, method, http_url, headers_dict) - - request_deferred = treq.request( - method, - url, - headers=Headers(headers_dict), - data=data, - agent=self.agent, - ) - add_timeout_to_deferred( - request_deferred, - timeout / 1000. if timeout else 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, - ) - response = yield make_deferred_yieldable( - request_deferred, + if json: + data = encode_canonical_json(json) + headers_dict["Content-Type"] = ["application/json"] + self.sign_request( + destination, method, http_url, headers_dict, json ) + else: + data = None + self.sign_request(destination, method, http_url, headers_dict) - log_result = "%d %s" % (response.code, response.phrase,) - break - except Exception as e: - if not retry_on_dns_fail and isinstance(e, DNSLookupError): - logger.warn( - "DNS Lookup failed to %s with %s", - destination, - e - ) - log_result = "DNS Lookup failed to %s with %s" % ( - destination, e - ) - raise + outbound_logger.info( + "{%s} [%s] Sending request: %s %s", + txn_id, destination, method, url + ) + request_deferred = treq.request( + method, + url, + headers=Headers(headers_dict), + data=data, + agent=self.agent, + ) + add_timeout_to_deferred( + request_deferred, + timeout / 1000. if timeout else 60, + self.hs.get_reactor(), + cancelled_to_request_timed_out_error, + ) + response = yield make_deferred_yieldable( + request_deferred, + ) + + log_result = "%d %s" % (response.code, response.phrase,) + break + except Exception as e: + if not retry_on_dns_fail and isinstance(e, DNSLookupError): logger.warn( - "{%s} Sending request failed to %s: %s %s: %s", - txn_id, + "DNS Lookup failed to %s with %s", destination, - method, - url, - _flatten_response_never_received(e), + e ) + log_result = "DNS Lookup failed to %s with %s" % ( + destination, e + ) + raise - log_result = _flatten_response_never_received(e) + logger.warn( + "{%s} Sending request failed to %s: %s %s: %s", + txn_id, + destination, + method, + url, + _flatten_response_never_received(e), + ) - if retries_left and not timeout: - if long_retries: - delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) - delay = min(delay, 60) - delay *= random.uniform(0.8, 1.4) - else: - delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) - delay = min(delay, 2) - delay *= random.uniform(0.8, 1.4) + log_result = _flatten_response_never_received(e) - yield self.clock.sleep(delay) - retries_left -= 1 + if retries_left and not timeout: + if long_retries: + delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) + delay = min(delay, 60) + delay *= random.uniform(0.8, 1.4) else: - raise - finally: - outbound_logger.info( - "{%s} [%s] Result: %s", - txn_id, - destination, - log_result, - ) + delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) + delay = min(delay, 2) + delay *= random.uniform(0.8, 1.4) + + yield self.clock.sleep(delay) + retries_left -= 1 + else: + raise + finally: + outbound_logger.info( + "{%s} [%s] Result: %s", + txn_id, + destination, + log_result, + ) if 200 <= response.code < 300: pass From 8decd6233dabd87160794949ffd95282e60ab01e Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 12 Sep 2018 16:22:15 +0100 Subject: [PATCH 084/138] improve naming --- changelog.d/3846.feature | 2 +- synapse/app/homeserver.py | 14 +++++++++----- synapse/storage/monthly_active_users.py | 2 +- tests/storage/test_monthly_active_users.py | 6 +++--- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/changelog.d/3846.feature b/changelog.d/3846.feature index dc9e9d86cc..453c11d3f8 100644 --- a/changelog.d/3846.feature +++ b/changelog.d/3846.feature @@ -1 +1 @@ -create synapse_admin_mau:reserved metric to expose number of real reaserved users +Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e6372bcec2..ac97e19649 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -307,7 +307,10 @@ class SynapseHomeServer(HomeServer): # Gauges to expose monthly active user control metrics current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") -reserved_mau_gauge = Gauge("synapse_admin_mau:reserved", "Reserved real MAU users") +registered_reserved_users_mau_gauge = Gauge( + "synapse_admin_mau:registered_reserved_users", + "Registered users with reserved threepids" +) def setup(config_options): @@ -533,12 +536,13 @@ def run(hs): @defer.inlineCallbacks def generate_monthly_active_users(): current_mau_count = 0 - reserved_mau_count = 0 + reserved_count = 0 + store = hs.get_datastore() if hs.config.limit_usage_by_mau: - current_mau_count = yield hs.get_datastore().get_monthly_active_count() - reserved_mau_count = yield hs.get_datastore().get_reserved_real_user_account() + current_mau_count = yield store.get_monthly_active_count() + reserved_count = yield store.get_registered_reserved_users_count() current_mau_gauge.set(float(current_mau_count)) - reserved_mau_gauge.set(float(reserved_mau_count)) + registered_reserved_users_mau_gauge.set(float(reserved_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) hs.get_datastore().initialise_reserved_users( diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 53d125d305..59580949f1 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -147,7 +147,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): return self.runInteraction("count_users", _count_users) @defer.inlineCallbacks - def get_reserved_real_user_account(self): + def get_registered_reserved_users_count(self): """Of the reserved threepids defined in config, how many are associated with registered users? diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 662f2ed845..686f12a0dc 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -186,7 +186,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): def test_get_reserved_real_user_account(self): # Test no reserved users, or reserved threepids - count = self.store.get_reserved_real_user_account() + count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), 0) # Test reserved users but no registered users @@ -201,7 +201,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): self.hs.config.mau_limits_reserved_threepids = threepids self.store.initialise_reserved_users(threepids) self.pump() - count = self.store.get_reserved_real_user_account() + count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), 0) # Test reserved registed users @@ -212,5 +212,5 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): now = int(self.hs.get_clock().time_msec()) self.store.user_add_threepid(user1, "email", user1_email, now, now) self.store.user_add_threepid(user2, "email", user2_email, now, now) - count = self.store.get_reserved_real_user_account() + count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), len(threepids)) From 3db016b641bf9141c56d10376c7dcbc8108a6a9e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Sep 2018 16:25:18 +0100 Subject: [PATCH 085/138] Newsfile --- changelog.d/3853.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3853.misc diff --git a/changelog.d/3853.misc b/changelog.d/3853.misc new file mode 100644 index 0000000000..db45d4983d --- /dev/null +++ b/changelog.d/3853.misc @@ -0,0 +1 @@ +Log when we retry outbound requests From 11bfc2af1cfdff08775160ea0c91c51326f1f665 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 16:45:42 +0100 Subject: [PATCH 086/138] fix logline --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 23983a51ab..34729e7f8c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1701,7 +1701,7 @@ class SyncHandler(object): if batch.limited: user_id = sync_result_builder.sync_config.user.to_string() logger.info( - "Incremental syncing room %s for user %s with %d state events" % ( + "Incremental gappy sync of room %s for user %s with %d state events" % ( room_id, user_id, len(state), From 0e200e366d568586e8f7f9dd0beb9b53bef86047 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 16:47:20 +0100 Subject: [PATCH 087/138] correctly log gappy sync metrics --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 34729e7f8c..88ef894d6e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1698,7 +1698,7 @@ class SyncHandler(object): sync_result_builder.joined.append(room_sync) - if batch.limited: + if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.info( "Incremental gappy sync of room %s for user %s with %d state events" % ( From 0403cf07839abc0129ed75e785b10330338caf76 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 16:54:28 +0100 Subject: [PATCH 088/138] argh pep8 --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 88ef894d6e..45a1dd5952 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1701,7 +1701,7 @@ class SyncHandler(object): if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.info( - "Incremental gappy sync of room %s for user %s with %d state events" % ( + "Incremental gappy sync of %s for user %s with %d state events" % ( room_id, user_id, len(state), From 2ac1abbc7e8647548ae2e32119cb2bd3b173d9bd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 12 Sep 2018 17:11:05 +0100 Subject: [PATCH 089/138] show heroes if a room has a 'deleted' name/canonical_alias (#3851) --- changelog.d/3851.bugfix | 1 + synapse/handlers/sync.py | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3851.bugfix diff --git a/changelog.d/3851.bugfix b/changelog.d/3851.bugfix new file mode 100644 index 0000000000..b53a9efe7b --- /dev/null +++ b/changelog.d/3851.bugfix @@ -0,0 +1 @@ +Show heroes if room name/canonical alias has been deleted diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 45a1dd5952..9bca4e7067 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -562,8 +562,21 @@ class SyncHandler(object): details.get(Membership.INVITE, empty_ms).count ) - if name_id or canonical_alias_id: - defer.returnValue(summary) + # if the room has a name or canonical_alias set, we can skip + # calculating heroes. we assume that if the event has contents, it'll + # be a valid name or canonical_alias - i.e. we're checking that they + # haven't been "deleted" by blatting {} over the top. + if name_id: + name = yield self.store.get_event(name_id, allow_none=False) + if name and name.content: + defer.returnValue(summary) + + if canonical_alias_id: + canonical_alias = yield self.store.get_event( + canonical_alias_id, allow_none=False, + ) + if canonical_alias and canonical_alias.content: + defer.returnValue(summary) joined_user_ids = [ r[0] for r in details.get(Membership.JOIN, empty_ms).members From b7d2fb5eb9c55f95048444bb6703abbf2fe14c5c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 13 Sep 2018 19:59:32 +1000 Subject: [PATCH 090/138] Remove some superfluous logging (#3855) --- .circleci/merge_base_branch.sh | 2 +- changelog.d/3855.misc | 1 + synapse/__init__.py | 10 ++++++++++ synapse/config/logger.py | 17 ++++++++++++++++- 4 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 changelog.d/3855.misc diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh index 2d700dbf11..4e297d77da 100755 --- a/.circleci/merge_base_branch.sh +++ b/.circleci/merge_base_branch.sh @@ -4,7 +4,7 @@ set -e # CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful. # In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL. -echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> "$BASH_ENV" +echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV source $BASH_ENV if [[ -z "${CIRCLE_PR_NUMBER}" ]] diff --git a/changelog.d/3855.misc b/changelog.d/3855.misc new file mode 100644 index 0000000000..a25bb020ba --- /dev/null +++ b/changelog.d/3855.misc @@ -0,0 +1 @@ +Removed some excess logging messages. \ No newline at end of file diff --git a/synapse/__init__.py b/synapse/__init__.py index 65a2b894cc..9dbe0b9f10 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,4 +17,14 @@ """ This is a reference implementation of a Matrix home server. """ +try: + from twisted.internet import protocol + from twisted.internet.protocol import Factory + from twisted.names.dns import DNSDatagramProtocol + protocol.Factory.noisy = False + Factory.noisy = False + DNSDatagramProtocol.noisy = False +except ImportError: + pass + __version__ = "0.33.4" diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 3f187adfc8..e9a936118d 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -227,7 +227,22 @@ def setup_logging(config, use_worker_options=False): # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() + + def _log(event): + + if "log_text" in event: + if event["log_text"].startswith("DNSDatagramProtocol starting on "): + return + + if event["log_text"].startswith("(UDP Port "): + return + + if event["log_text"].startswith("Timing out client"): + return + + return observer(event) + globalLogBeginner.beginLoggingTo( - [observer], + [_log], redirectStandardIO=not config.no_redirect_stdio, ) From c857f5ef9b77b07179555a63973f11b8aa8c1752 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 12:48:10 +0100 Subject: [PATCH 091/138] Make purge history slightly faster Don't pull out events that are outliers and won't be deleted, as nothing should happen to them. --- synapse/storage/events.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 30ff87a4c4..1c8e01a47e 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1930,15 +1930,22 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore should_delete_params = () if not delete_local_events: should_delete_expr += " AND event_id NOT LIKE ?" - should_delete_params += ("%:" + self.hs.hostname, ) + should_delete_params += ( + "%:" + self.hs.hostname, + "%:" + self.hs.hostname, + ) should_delete_params += (room_id, token.topological) + # Note that we insert events that are outliers and aren't going to be + # deleted, as nothing will happen to them. txn.execute( "INSERT INTO events_to_purge" " SELECT event_id, %s" " FROM events AS e LEFT JOIN state_events USING (event_id)" - " WHERE e.room_id = ? AND topological_ordering < ?" % ( + " WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?" + % ( + should_delete_expr, should_delete_expr, ), should_delete_params, From e7cd7cb0f02f1f918387e4b3061b4a251c5d68fc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 12:55:40 +0100 Subject: [PATCH 092/138] Newsfile --- changelog.d/3856.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3856.misc diff --git a/changelog.d/3856.misc b/changelog.d/3856.misc new file mode 100644 index 0000000000..36c311eb3d --- /dev/null +++ b/changelog.d/3856.misc @@ -0,0 +1 @@ +Speed up purge history for rooms that have been previously purged From 9dbe38ea7de66f6e5dfab1570dd3a5217a4883eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 15:05:52 +0100 Subject: [PATCH 093/138] Create indices after insertion --- synapse/storage/events.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 1c8e01a47e..622f2ababf 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1890,20 +1890,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ")" ) - # create an index on should_delete because later we'll be looking for - # the should_delete / shouldn't_delete subsets - txn.execute( - "CREATE INDEX events_to_purge_should_delete" - " ON events_to_purge(should_delete)", - ) - - # We do joins against events_to_purge for e.g. calculating state - # groups to purge, etc., so lets make an index. - txn.execute( - "CREATE INDEX events_to_purge_id" - " ON events_to_purge(event_id)", - ) - # First ensure that we're not about to delete all the forward extremeties txn.execute( "SELECT e.event_id, e.depth FROM events as e " @@ -1950,6 +1936,24 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ), should_delete_params, ) + + # We create the indices *after* insertion as that's a lot faster. + + # create an index on should_delete because later we'll be looking for + # the should_delete / shouldn't_delete subsets + txn.execute( + "CREATE INDEX events_to_purge_should_delete" + " ON events_to_purge(should_delete)", + ) + + # We do joins against events_to_purge for e.g. calculating state + # groups to purge, etc., so lets make an index. + txn.execute( + "CREATE INDEX events_to_purge_id" + " ON events_to_purge(event_id)", + ) + + txn.execute( "SELECT event_id, should_delete FROM events_to_purge" ) From 9cbd0094f0dcf03676b3234896e73af80cb75021 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 15:15:35 +0100 Subject: [PATCH 094/138] pep8 --- synapse/storage/events.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 622f2ababf..bc1c18023b 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1953,7 +1953,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore " ON events_to_purge(event_id)", ) - txn.execute( "SELECT event_id, should_delete FROM events_to_purge" ) From bfa0b759e02c74931606ea77f34bec99dfb10589 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 00:15:51 +1000 Subject: [PATCH 095/138] Attempt to figure out what's going on with timeouts (#3857) --- changelog.d/3857.misc | 1 + synapse/http/matrixfederationclient.py | 98 +++++++-------- tests/http/test_fedclient.py | 157 +++++++++++++++++++++++++ tests/server.py | 42 ++++++- 4 files changed, 241 insertions(+), 57 deletions(-) create mode 100644 changelog.d/3857.misc create mode 100644 tests/http/test_fedclient.py diff --git a/changelog.d/3857.misc b/changelog.d/3857.misc new file mode 100644 index 0000000000..e128d193d9 --- /dev/null +++ b/changelog.d/3857.misc @@ -0,0 +1 @@ +Refactor some HTTP timeout code. \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index cf920bc041..c49dbacd93 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -26,7 +26,7 @@ from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json -from twisted.internet import defer, protocol, reactor +from twisted.internet import defer, protocol from twisted.internet.error import DNSLookupError from twisted.web._newclient import ResponseDone from twisted.web.client import Agent, HTTPConnectionPool @@ -40,10 +40,8 @@ from synapse.api.errors import ( HttpResponseException, SynapseError, ) -from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint from synapse.util import logcontext -from synapse.util.async_helpers import add_timeout_to_deferred from synapse.util.logcontext import make_deferred_yieldable logger = logging.getLogger(__name__) @@ -66,13 +64,14 @@ else: class MatrixFederationEndpointFactory(object): def __init__(self, hs): + self.reactor = hs.get_reactor() self.tls_client_options_factory = hs.tls_client_options_factory def endpointForURI(self, uri): destination = uri.netloc.decode('ascii') return matrix_federation_endpoint( - reactor, destination, timeout=10, + self.reactor, destination, timeout=10, tls_client_options_factory=self.tls_client_options_factory ) @@ -90,6 +89,7 @@ class MatrixFederationHttpClient(object): self.hs = hs self.signing_key = hs.config.signing_key[0] self.server_name = hs.hostname + reactor = hs.get_reactor() pool = HTTPConnectionPool(reactor) pool.maxPersistentPerHost = 5 pool.cachedConnectionTimeout = 2 * 60 @@ -100,6 +100,7 @@ class MatrixFederationHttpClient(object): self._store = hs.get_datastore() self.version_string = hs.version_string.encode('ascii') self._next_id = 1 + self.default_timeout = 60 def _create_url(self, destination, path_bytes, param_bytes, query_bytes): return urllib.parse.urlunparse( @@ -143,6 +144,11 @@ class MatrixFederationHttpClient(object): (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ + if timeout: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + if ( self.hs.config.federation_domain_whitelist is not None and destination not in self.hs.config.federation_domain_whitelist @@ -215,13 +221,9 @@ class MatrixFederationHttpClient(object): headers=Headers(headers_dict), data=data, agent=self.agent, + reactor=self.hs.get_reactor() ) - add_timeout_to_deferred( - request_deferred, - timeout / 1000. if timeout else 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, - ) + request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) response = yield make_deferred_yieldable( request_deferred, ) @@ -261,6 +263,13 @@ class MatrixFederationHttpClient(object): delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) + logger.debug( + "{%s} Waiting %s before sending to %s...", + txn_id, + delay, + destination + ) + yield self.clock.sleep(delay) retries_left -= 1 else: @@ -279,10 +288,9 @@ class MatrixFederationHttpClient(object): # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.content(response), - timeout, - ) + d = treq.content(response) + d.addTimeout(_sec_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) raise HttpResponseException( response.code, response.phrase, body ) @@ -396,10 +404,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.json_content(response), - timeout, - ) + d = treq.json_content(response) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) defer.returnValue(body) @defer.inlineCallbacks @@ -449,10 +456,14 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.json_content(response), - timeout, - ) + d = treq.json_content(response) + if timeout: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + + d.addTimeout(_sec_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) defer.returnValue(body) @@ -504,10 +515,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.json_content(response), - timeout, - ) + d = treq.json_content(response) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) defer.returnValue(body) @@ -554,10 +564,9 @@ class MatrixFederationHttpClient(object): check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): - body = yield self._timeout_deferred( - treq.json_content(response), - timeout, - ) + d = treq.json_content(response) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + body = yield make_deferred_yieldable(d) defer.returnValue(body) @@ -599,38 +608,15 @@ class MatrixFederationHttpClient(object): try: with logcontext.PreserveLoggingContext(): - length = yield self._timeout_deferred( - _readBodyToFile( - response, output_stream, max_size - ), - ) + d = _readBodyToFile(response, output_stream, max_size) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + length = yield make_deferred_yieldable(d) except Exception: logger.exception("Failed to download body") raise defer.returnValue((length, headers)) - def _timeout_deferred(self, deferred, timeout_ms=None): - """Times the deferred out after `timeout_ms` ms - - Args: - deferred (Deferred) - timeout_ms (int|None): Timeout in milliseconds. If None defaults - to 60 seconds. - - Returns: - Deferred - """ - - add_timeout_to_deferred( - deferred, - timeout_ms / 1000. if timeout_ms else 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, - ) - - return deferred - class _ReadBodyToFileProtocol(protocol.Protocol): def __init__(self, stream, deferred, max_size): diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py new file mode 100644 index 0000000000..1c46c9cfeb --- /dev/null +++ b/tests/http/test_fedclient.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from twisted.internet.defer import TimeoutError +from twisted.internet.error import ConnectingCancelledError, DNSLookupError +from twisted.web.client import ResponseNeverReceived + +from synapse.http.matrixfederationclient import MatrixFederationHttpClient + +from tests.unittest import HomeserverTestCase + + +class FederationClientTests(HomeserverTestCase): + def make_homeserver(self, reactor, clock): + + hs = self.setup_test_homeserver(reactor=reactor, clock=clock) + hs.tls_client_options_factory = None + return hs + + def prepare(self, reactor, clock, homeserver): + + self.cl = MatrixFederationHttpClient(self.hs) + self.reactor.lookups["testserv"] = "1.2.3.4" + + def test_dns_error(self): + """ + If the DNS raising returns an error, it will bubble up. + """ + d = self.cl._request("testserv2:8008", "GET", "foo/bar", timeout=10000) + self.pump() + + f = self.failureResultOf(d) + self.assertIsInstance(f.value, DNSLookupError) + + def test_client_never_connect(self): + """ + If the HTTP request is not connected and is timed out, it'll give a + ConnectingCancelledError. + """ + d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + + self.pump() + + # Nothing happened yet + self.assertFalse(d.called) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], '1.2.3.4') + self.assertEqual(clients[0][1], 8008) + + # Deferred is still without a result + self.assertFalse(d.called) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, ConnectingCancelledError) + + def test_client_connect_no_response(self): + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a ResponseNeverReceived. + """ + d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + + self.pump() + + # Nothing happened yet + self.assertFalse(d.called) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], '1.2.3.4') + self.assertEqual(clients[0][1], 8008) + + conn = Mock() + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred is still without a result + self.assertFalse(d.called) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, ResponseNeverReceived) + + def test_client_gets_headers(self): + """ + Once the client gets the headers, _request returns successfully. + """ + d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) + + self.pump() + + conn = Mock() + clients = self.reactor.tcpClients + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred does not have a result + self.assertFalse(d.called) + + # Send it the HTTP response + client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n") + + # We should get a successful response + r = self.successResultOf(d) + self.assertEqual(r.code, 200) + + def test_client_headers_no_body(self): + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a ResponseNeverReceived. + """ + d = self.cl.post_json("testserv:8008", "foo/bar", timeout=10000) + + self.pump() + + conn = Mock() + clients = self.reactor.tcpClients + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred does not have a result + self.assertFalse(d.called) + + # Send it the HTTP response + client.dataReceived( + (b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n" + b"Server: Fake\r\n\r\n") + ) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, TimeoutError) diff --git a/tests/server.py b/tests/server.py index a2c3ca61f6..420ec4e088 100644 --- a/tests/server.py +++ b/tests/server.py @@ -4,9 +4,14 @@ from io import BytesIO from six import text_type import attr +from zope.interface import implementer -from twisted.internet import address, threads +from twisted.internet import address, threads, udp +from twisted.internet._resolver import HostResolution +from twisted.internet.address import IPv4Address from twisted.internet.defer import Deferred +from twisted.internet.error import DNSLookupError +from twisted.internet.interfaces import IReactorPluggableNameResolver from twisted.python.failure import Failure from twisted.test.proto_helpers import MemoryReactorClock @@ -154,11 +159,46 @@ def render(request, resource, clock): wait_until_result(clock, request) +@implementer(IReactorPluggableNameResolver) class ThreadedMemoryReactorClock(MemoryReactorClock): """ A MemoryReactorClock that supports callFromThread. """ + def __init__(self): + self._udp = [] + self.lookups = {} + + class Resolver(object): + def resolveHostName( + _self, + resolutionReceiver, + hostName, + portNumber=0, + addressTypes=None, + transportSemantics='TCP', + ): + + resolution = HostResolution(hostName) + resolutionReceiver.resolutionBegan(resolution) + if hostName not in self.lookups: + raise DNSLookupError("OH NO") + + resolutionReceiver.addressResolved( + IPv4Address('TCP', self.lookups[hostName], portNumber) + ) + resolutionReceiver.resolutionComplete() + return resolution + + self.nameResolver = Resolver() + super(ThreadedMemoryReactorClock, self).__init__() + + def listenUDP(self, port, protocol, interface='', maxPacketSize=8196): + p = udp.Port(port, protocol, interface, maxPacketSize, self) + p.startListening() + self._udp.append(p) + return p + def callFromThread(self, callback, *args, **kwargs): """ Make the callback fire in the next reactor iteration. From 89a76d18898a0172010c25e5a7c66d57c16b9196 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 15:33:16 +0100 Subject: [PATCH 096/138] Fix handling of redacted events from federation If we receive an event that doesn't pass their content hash check (e.g. due to already being redacted) then we hit a bug which causes an exception to be raised, which then promplty stops the event (and request) from being processed. This effects all sorts of federation APIs, including joining rooms with a redacted state event. --- synapse/events/__init__.py | 5 +++++ synapse/federation/federation_base.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 51f9084b90..b782af6308 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -147,6 +149,9 @@ class EventBase(object): def items(self): return list(self._event_dict.items()) + def keys(self): + return six.iterkeys(self._event_dict) + class FrozenEvent(EventBase): def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None): diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 61782ae1c0..b7ad729c63 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -153,7 +153,7 @@ class FederationBase(object): # *actual* redacted copy to be on the safe side.) redacted_event = prune_event(pdu) if ( - set(six.iterkeys(redacted_event)) == set(six.iterkeys(pdu)) and + set(redacted_event.keys()) == set(pdu.keys()) and set(six.iterkeys(redacted_event.content)) == set(six.iterkeys(pdu.content)) ): From 3126b88d35994b09096239462abbbbcd17d9cbf0 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 00:44:31 +1000 Subject: [PATCH 097/138] fix circleci merged builds (#3858) * fix * changelog --- .circleci/merge_base_branch.sh | 4 ++++ changelog.d/3858.misc | 1 + 2 files changed, 5 insertions(+) create mode 100644 changelog.d/3858.misc diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh index 4e297d77da..9614eb91b6 100755 --- a/.circleci/merge_base_branch.sh +++ b/.circleci/merge_base_branch.sh @@ -19,6 +19,10 @@ GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_ # Show what we are before git show -s +# Set up username so it can do a merge +git config --global user.email bot@matrix.org +git config --global user.name "A robot" + # Fetch and merge. If it doesn't work, it will raise due to set -e. git fetch -u origin $GITBASE git merge --no-edit origin/$GITBASE diff --git a/changelog.d/3858.misc b/changelog.d/3858.misc new file mode 100644 index 0000000000..4644db5330 --- /dev/null +++ b/changelog.d/3858.misc @@ -0,0 +1 @@ +Fix running merged builds on CircleCI \ No newline at end of file From 13193a6e2bd3557cae89e34438e8598f75276c34 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 15:46:45 +0100 Subject: [PATCH 098/138] Newsfile --- changelog.d/3859.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3859.bugfix diff --git a/changelog.d/3859.bugfix b/changelog.d/3859.bugfix new file mode 100644 index 0000000000..ec5b172464 --- /dev/null +++ b/changelog.d/3859.bugfix @@ -0,0 +1 @@ +Fix handling of redacted events from federation From ed5331a6272280efd4fc98ae425711acec8c6be5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Sep 2018 16:10:56 +0100 Subject: [PATCH 099/138] comment --- synapse/storage/events.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index bc1c18023b..e7487311ce 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1916,6 +1916,8 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore should_delete_params = () if not delete_local_events: should_delete_expr += " AND event_id NOT LIKE ?" + + # We include the parameter twice since we use the expression twice should_delete_params += ( "%:" + self.hs.hostname, "%:" + self.hs.hostname, From 1c3f4d9ca53b3e3a5ef1849a8da68c6d8cf82938 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:09:13 +1000 Subject: [PATCH 100/138] buffer? --- synapse/http/matrixfederationclient.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c49dbacd93..cdbe27eb37 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -221,7 +221,8 @@ class MatrixFederationHttpClient(object): headers=Headers(headers_dict), data=data, agent=self.agent, - reactor=self.hs.get_reactor() + reactor=self.hs.get_reactor(), + unbuffered=True ) request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) response = yield make_deferred_yieldable( From 7c27c4d51cfd2a3d6504889b1ccdddabf38ae05c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:11:11 +1000 Subject: [PATCH 101/138] merge (#3576) --- .circleci/config.yml | 17 +++++--- .travis.yml | 3 ++ changelog.d/3576.feature | 1 + synapse/http/client.py | 14 +++++-- synapse/push/httppusher.py | 7 +++- synapse/push/mailer.py | 7 ++-- synapse/replication/slave/storage/devices.py | 23 ++++++---- tox.ini | 44 ++------------------ 8 files changed, 55 insertions(+), 61 deletions(-) create mode 100644 changelog.d/3576.feature diff --git a/.circleci/config.yml b/.circleci/config.yml index 5266544f3c..605430fb3f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -53,7 +53,7 @@ jobs: steps: - checkout - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 - store_artifacts: path: ~/project/logs destination: logs @@ -76,7 +76,7 @@ jobs: - checkout - run: bash .circleci/merge_base_branch.sh - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 - store_artifacts: path: ~/project/logs destination: logs @@ -101,6 +101,8 @@ workflows: jobs: - sytestpy2 - sytestpy2postgres + - sytestpy3 + - sytestpy3postgres - sytestpy2merged: filters: branches: @@ -109,6 +111,11 @@ workflows: filters: branches: ignore: /develop|master/ -# Currently broken while the Python 3 port is incomplete -# - sytestpy3 -# - sytestpy3postgres + - sytestpy3merged: + filters: + branches: + ignore: /develop|master/ + - sytestpy3postgresmerged: + filters: + branches: + ignore: /develop|master/ diff --git a/.travis.yml b/.travis.yml index ebc972ed24..b3ee66da8f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,6 +25,9 @@ matrix: services: - postgresql + - python: 3.5 + env: TOX_ENV=py35 + - python: 3.6 env: TOX_ENV=py36 diff --git a/changelog.d/3576.feature b/changelog.d/3576.feature new file mode 100644 index 0000000000..02a10e370d --- /dev/null +++ b/changelog.d/3576.feature @@ -0,0 +1 @@ +Python 3.5+ is now supported. diff --git a/synapse/http/client.py b/synapse/http/client.py index 4ba54fed05..d60f87b04e 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -348,7 +348,8 @@ class SimpleHttpClient(object): resp_headers = dict(response.headers.getAllRawHeaders()) - if 'Content-Length' in resp_headers and resp_headers['Content-Length'] > max_size: + if (b'Content-Length' in resp_headers and + int(resp_headers[b'Content-Length']) > max_size): logger.warn("Requested URL is too large > %r bytes" % (self.max_size,)) raise SynapseError( 502, @@ -381,7 +382,12 @@ class SimpleHttpClient(object): ) defer.returnValue( - (length, resp_headers, response.request.absoluteURI, response.code), + ( + length, + resp_headers, + response.request.absoluteURI.decode('ascii'), + response.code, + ), ) @@ -466,9 +472,9 @@ class SpiderEndpointFactory(object): def endpointForURI(self, uri): logger.info("Getting endpoint for %s", uri.toBytes()) - if uri.scheme == "http": + if uri.scheme == b"http": endpoint_factory = HostnameEndpoint - elif uri.scheme == "https": + elif uri.scheme == b"https": tlsCreator = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port) def endpoint_factory(reactor, host, port, **kw): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 81e18bcf7d..48abd5e4d6 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -15,6 +15,8 @@ # limitations under the License. import logging +import six + from prometheus_client import Counter from twisted.internet import defer @@ -26,6 +28,9 @@ from synapse.util.metrics import Measure from . import push_rule_evaluator, push_tools +if six.PY3: + long = int + logger = logging.getLogger(__name__) http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "") @@ -96,7 +101,7 @@ class HttpPusher(object): @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): - self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) yield self._process() @defer.inlineCallbacks diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index bfa6df7b68..b78ce10396 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -17,10 +17,11 @@ import email.mime.multipart import email.utils import logging import time -import urllib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +from six.moves import urllib + import bleach import jinja2 @@ -474,7 +475,7 @@ class Mailer(object): # XXX: make r0 once API is stable return "%s_matrix/client/unstable/pushers/remove?%s" % ( self.hs.config.public_baseurl, - urllib.urlencode(params), + urllib.parse.urlencode(params), ) @@ -561,7 +562,7 @@ def _create_mxc_to_http_filter(config): return "%s_matrix/media/v1/thumbnail/%s?%s%s" % ( config.public_baseurl, serverAndMediaId, - urllib.urlencode(params), + urllib.parse.urlencode(params), fragment or "", ) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 8206a988f7..21b8c468fa 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + from synapse.storage import DataStore from synapse.storage.end_to_end_keys import EndToEndKeyStore from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -21,6 +23,13 @@ from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker +def __func__(inp): + if six.PY3: + return inp + else: + return inp.__func__ + + class SlavedDeviceStore(BaseSlavedStore): def __init__(self, db_conn, hs): super(SlavedDeviceStore, self).__init__(db_conn, hs) @@ -38,14 +47,14 @@ class SlavedDeviceStore(BaseSlavedStore): "DeviceListFederationStreamChangeCache", device_list_max, ) - get_device_stream_token = DataStore.get_device_stream_token.__func__ - get_user_whose_devices_changed = DataStore.get_user_whose_devices_changed.__func__ - get_devices_by_remote = DataStore.get_devices_by_remote.__func__ - _get_devices_by_remote_txn = DataStore._get_devices_by_remote_txn.__func__ - _get_e2e_device_keys_txn = DataStore._get_e2e_device_keys_txn.__func__ - mark_as_sent_devices_by_remote = DataStore.mark_as_sent_devices_by_remote.__func__ + get_device_stream_token = __func__(DataStore.get_device_stream_token) + get_user_whose_devices_changed = __func__(DataStore.get_user_whose_devices_changed) + get_devices_by_remote = __func__(DataStore.get_devices_by_remote) + _get_devices_by_remote_txn = __func__(DataStore._get_devices_by_remote_txn) + _get_e2e_device_keys_txn = __func__(DataStore._get_e2e_device_keys_txn) + mark_as_sent_devices_by_remote = __func__(DataStore.mark_as_sent_devices_by_remote) _mark_as_sent_devices_by_remote_txn = ( - DataStore._mark_as_sent_devices_by_remote_txn.__func__ + __func__(DataStore._mark_as_sent_devices_by_remote_txn) ) count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"] diff --git a/tox.ini b/tox.ini index 085f438989..80ac9324df 100644 --- a/tox.ini +++ b/tox.ini @@ -64,49 +64,11 @@ setenv = {[base]setenv} SYNAPSE_POSTGRES = 1 +[testenv:py35] +usedevelop=true + [testenv:py36] usedevelop=true -commands = - /usr/bin/find "{toxinidir}" -name '*.pyc' -delete - coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ - "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests/config \ - tests/api/test_filtering.py \ - tests/api/test_ratelimiting.py \ - tests/appservice \ - tests/crypto \ - tests/events \ - tests/handlers/test_appservice.py \ - tests/handlers/test_auth.py \ - tests/handlers/test_device.py \ - tests/handlers/test_directory.py \ - tests/handlers/test_e2e_keys.py \ - tests/handlers/test_presence.py \ - tests/handlers/test_profile.py \ - tests/handlers/test_register.py \ - tests/replication/slave/storage/test_account_data.py \ - tests/replication/slave/storage/test_receipts.py \ - tests/storage/test_appservice.py \ - tests/storage/test_background_update.py \ - tests/storage/test_base.py \ - tests/storage/test__base.py \ - tests/storage/test_client_ips.py \ - tests/storage/test_devices.py \ - tests/storage/test_end_to_end_keys.py \ - tests/storage/test_event_push_actions.py \ - tests/storage/test_keys.py \ - tests/storage/test_presence.py \ - tests/storage/test_profile.py \ - tests/storage/test_registration.py \ - tests/storage/test_room.py \ - tests/storage/test_user_directory.py \ - tests/test_distributor.py \ - tests/test_dns.py \ - tests/test_preview.py \ - tests/test_test_utils.py \ - tests/test_types.py \ - tests/util} \ - {env:TOXSUFFIX:} - {env:DUMP_COVERAGE_COMMAND:coverage report -m} [testenv:packaging] deps = From 63755fa4c25f120941aba24d6192815313d12ea7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:21:47 +1000 Subject: [PATCH 102/138] we do that higher up --- synapse/http/matrixfederationclient.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index cdbe27eb37..d1a6e87706 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -91,6 +91,7 @@ class MatrixFederationHttpClient(object): self.server_name = hs.hostname reactor = hs.get_reactor() pool = HTTPConnectionPool(reactor) + pool.retryAutomatically = False pool.maxPersistentPerHost = 5 pool.cachedConnectionTimeout = 2 * 60 self.agent = Agent.usingEndpointFactory( From 7c33ab76da91a747fd442f65c9bfe187a81864f7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:45:34 +1000 Subject: [PATCH 103/138] redact better --- synapse/http/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 58ef8d3ce4..a3f9e4f67c 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -38,12 +38,12 @@ def cancelled_to_request_timed_out_error(value, timeout): return value -ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') +ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') def redact_uri(uri): """Strips access tokens from the uri replaces with """ return ACCESS_TOKEN_RE.sub( - br'\1\3', + r'\1\3', uri ) From f1a72646636c08e5e0f6dc6f583d0bffebf94c85 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 13 Sep 2018 11:51:12 -0600 Subject: [PATCH 104/138] Fix minor typo in exception --- synapse/replication/tcp/streams.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/replication/tcp/streams.py b/synapse/replication/tcp/streams.py index 55fe701c5c..c1e626be3f 100644 --- a/synapse/replication/tcp/streams.py +++ b/synapse/replication/tcp/streams.py @@ -196,7 +196,7 @@ class Stream(object): ) if len(rows) >= MAX_EVENTS_BEHIND: - raise Exception("stream %s has fallen behined" % (self.NAME)) + raise Exception("stream %s has fallen behind" % (self.NAME)) else: rows = yield self.update_function( from_token, current_token, From 8f08d848f53bc79cdd47f8d4025313268a16b227 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:53:56 +1000 Subject: [PATCH 105/138] fix --- synapse/http/site.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/http/site.py b/synapse/http/site.py index f0828c6542..18f2d37e00 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -85,7 +85,7 @@ class SynapseRequest(Request): return "%s-%i" % (self.method, self.request_seq) def get_redacted_uri(self): - return redact_uri(self.uri) + return redact_uri(self.uri.decode('ascii')) def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] From c971aa7b9d5b4d3900404a285f34dd5004a76f37 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 14 Sep 2018 03:57:02 +1000 Subject: [PATCH 106/138] fix --- synapse/http/site.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/http/site.py b/synapse/http/site.py index 18f2d37e00..e1e53e8ae5 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -85,7 +85,10 @@ class SynapseRequest(Request): return "%s-%i" % (self.method, self.request_seq) def get_redacted_uri(self): - return redact_uri(self.uri.decode('ascii')) + uri = self.uri + if isinstance(uri, bytes): + uri = self.uri.decode('ascii') + return redact_uri(uri) def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] From 984db8bb0858757884c0f742bdaa27ae044d2c85 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 13 Sep 2018 12:06:04 -0600 Subject: [PATCH 107/138] Create 3860.misc --- changelog.d/3860.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3860.misc diff --git a/changelog.d/3860.misc b/changelog.d/3860.misc new file mode 100644 index 0000000000..364239d3e3 --- /dev/null +++ b/changelog.d/3860.misc @@ -0,0 +1 @@ +Fix typo in replication stream exception. From 0a81038ea0e308d2944b7deae43060e4982b0abe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 14:39:59 +0100 Subject: [PATCH 108/138] Add in flight real time metrics for Measure blocks --- synapse/metrics/__init__.py | 110 +++++++++++++++++++++++++++++++++++- synapse/util/metrics.py | 22 ++++++++ 2 files changed, 131 insertions(+), 1 deletion(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 550f8443f7..98333b2048 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -18,8 +18,11 @@ import gc import logging import os import platform +import threading import time +import six + import attr from prometheus_client import Counter, Gauge, Histogram from prometheus_client.core import REGISTRY, GaugeMetricFamily @@ -68,7 +71,7 @@ class LaterGauge(object): return if isinstance(calls, dict): - for k, v in calls.items(): + for k, v in six.iteritems(calls): g.add_metric(k, v) else: g.add_metric([], calls) @@ -87,6 +90,111 @@ class LaterGauge(object): all_gauges[self.name] = self +class InFlightGauge(object): + """Tracks number of things (e.g. requests, Measure blocks, etc) in flight + at any given time. + + Each InFlightGauge will create a metric called `_total` that counts + the number of in flight blocks, as well as a metrics for each item in the + given `sub_metrics` as `_` which will get updated by the + callbacks. + + Args: + name (str) + desc (str) + labels (list[str]) + sub_metrics (list[str]): A list of sub metrics that the callbacks + will update. + """ + + # TODO: Expand this to + + def __init__(self, name, desc, labels, sub_metrics): + self.name = name + self.desc = desc + self.labels = labels + self.sub_metrics = sub_metrics + + # Create a class which have the sub_metrics values as attributes, which + # default to 0 on initialization. Used to pass to registered callbacks. + self._metrics_class = attr.make_class( + "_MetricsEntry", + attrs={x: attr.ib(0) for x in sub_metrics}, + slots=True, + ) + + # Counts number of in flight blocks for a given set of label values + self._registrations = {} + + # Protects access to _registrations + self._lock = threading.Lock() + + self._register_with_collector() + + def register(self, key, callback): + """Registers that we've entered a new block with labels `key`. + + `callback` gets called each time the metrics are collected. The same + value must also be given to `unregister`. + + `callback` gets called with an object that has an attribute per + sub_metric, which should be updated with the necessary values. Note that + the metrics object is shared between all callbacks registered with the + same key. + + Note that `callback` may be called on a separate thread. + """ + with self._lock: + self._registrations.setdefault(key, set()).add(callback) + + def unregister(self, key, callback): + """Registers that we've exited a block with labels `key`. + """ + + with self._lock: + self._registrations.setdefault(key, set()).discard(callback) + + def collect(self): + """Called by prometheus client when it reads metrics. + + Note: may be called by a separate thread. + """ + in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels) + + metrics_by_key = {} + + # We copy so that we don't mutate the list while iterating + with self._lock: + keys = list(self._registrations) + + for key in keys: + with self._lock: + callbacks = set(self._registrations[key]) + + in_flight.add_metric(key, len(callbacks)) + + metrics = self._metrics_class() + metrics_by_key[key] = metrics + for callback in callbacks: + callback(metrics) + + yield in_flight + + for name in self.sub_metrics: + gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels) + for key, metrics in six.iteritems(metrics_by_key): + gauge.add_metric(key, getattr(metrics, name)) + yield gauge + + def _register_with_collector(self): + if self.name in all_gauges.keys(): + logger.warning("%s already registered, reregistering" % (self.name,)) + REGISTRY.unregister(all_gauges.pop(self.name)) + + REGISTRY.register(self) + all_gauges[self.name] = self + + # # Detailed CPU metrics # diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 97f1267380..4b4ac5f6c7 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -20,6 +20,7 @@ from prometheus_client import Counter from twisted.internet import defer +from synapse.metrics import InFlightGauge from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) @@ -45,6 +46,13 @@ block_db_txn_duration = Counter( block_db_sched_duration = Counter( "synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) +# Tracks the number of blocks currently active +in_flight = InFlightGauge( + "synapse_util_metrics_block_in_flight", "", + labels=["block_name"], + sub_metrics=["real_time_max", "real_time_sum"], +) + def measure_func(name): def wrapper(func): @@ -82,10 +90,14 @@ class Measure(object): self.start_usage = self.start_context.get_resource_usage() + in_flight.register((self.name,), self._update_in_flight) + def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: return + in_flight.unregister((self.name,), self._update_in_flight) + duration = self.clock.time() - self.start block_counter.labels(self.name).inc() @@ -120,3 +132,13 @@ class Measure(object): if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) + + def _update_in_flight(self, metrics): + """Gets called when processing in flight metrics + """ + duration = self.clock.time() - self.start + + metrics.real_time_max = max(metrics.real_time_max, duration) + metrics.real_time_sum += duration + + # TODO: Add other in flight metrics. From f6e82dcddb49a6e6118d85e54655f5adaf1eda4e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 15:08:22 +0100 Subject: [PATCH 109/138] Tests --- tests/test_metrics.py | 81 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 tests/test_metrics.py diff --git a/tests/test_metrics.py b/tests/test_metrics.py new file mode 100644 index 0000000000..17897711a1 --- /dev/null +++ b/tests/test_metrics.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from synapse.metrics import InFlightGauge + +from tests import unittest + + +class TestMauLimit(unittest.TestCase): + def test_basic(self): + gauge = InFlightGauge( + "test1", "", + labels=["test_label"], + sub_metrics=["foo", "bar"], + ) + + def handle1(metrics): + metrics.foo += 2 + metrics.bar = max(metrics.bar, 5) + + def handle2(metrics): + metrics.foo += 3 + metrics.bar = max(metrics.bar, 7) + + gauge.register(("key1",), handle1) + + self.assert_dict({ + "test1_total": {("key1",): 1}, + "test1_foo": {("key1",): 2}, + "test1_bar": {("key1",): 5}, + }, self.get_metrics_from_gauge(gauge)) + + gauge.unregister(("key1",), handle1) + + self.assert_dict({ + "test1_total": {("key1",): 0}, + "test1_foo": {("key1",): 0}, + "test1_bar": {("key1",): 0}, + }, self.get_metrics_from_gauge(gauge)) + + gauge.register(("key1",), handle1) + gauge.register(("key2",), handle2) + + self.assert_dict({ + "test1_total": {("key1",): 1, ("key2",): 1}, + "test1_foo": {("key1",): 2, ("key2",): 3}, + "test1_bar": {("key1",): 5, ("key2",): 7}, + }, self.get_metrics_from_gauge(gauge)) + + gauge.unregister(("key2",), handle2) + gauge.register(("key1",), handle2) + + self.assert_dict({ + "test1_total": {("key1",): 2, ("key2",): 0}, + "test1_foo": {("key1",): 5, ("key2",): 0}, + "test1_bar": {("key1",): 7, ("key2",): 0}, + }, self.get_metrics_from_gauge(gauge)) + + def get_metrics_from_gauge(self, gauge): + results = {} + + for r in gauge.collect(): + results[r.name] = { + tuple(labels[x] for x in gauge.labels): value + for _, labels, value in r.samples + } + + return results From 941ac0f0856729087bd28d9243964c35147fcb7b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 14:41:16 +0100 Subject: [PATCH 110/138] Newsfile --- changelog.d/3871.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3871.misc diff --git a/changelog.d/3871.misc b/changelog.d/3871.misc new file mode 100644 index 0000000000..dd9510ceb6 --- /dev/null +++ b/changelog.d/3871.misc @@ -0,0 +1 @@ +Add in flight real time metrics for Measure blocks From 9e2f9a7b57a8c4d2238684a9ccd6145948229e2c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 15:11:26 +0100 Subject: [PATCH 111/138] Measure outbound requests --- synapse/http/matrixfederationclient.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c49dbacd93..c3542b9353 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -43,6 +43,7 @@ from synapse.api.errors import ( from synapse.http.endpoint import matrix_federation_endpoint from synapse.util import logcontext from synapse.util.logcontext import make_deferred_yieldable +from synapse.util.metrics import Measure logger = logging.getLogger(__name__) outbound_logger = logging.getLogger("synapse.http.outbound") @@ -224,9 +225,11 @@ class MatrixFederationHttpClient(object): reactor=self.hs.get_reactor() ) request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) - response = yield make_deferred_yieldable( - request_deferred, - ) + + with Measure(self.clock, "outbound_request"): + response = yield make_deferred_yieldable( + request_deferred, + ) log_result = "%d %s" % (response.code, response.phrase,) break From d0f6c1ce2189693db40a385f9ff19bdbbc5c87cf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 15:12:36 +0100 Subject: [PATCH 112/138] Remove spurious comment --- synapse/metrics/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 98333b2048..59900aa5d1 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -107,8 +107,6 @@ class InFlightGauge(object): will update. """ - # TODO: Expand this to - def __init__(self, name, desc, labels, sub_metrics): self.name = name self.desc = desc From 90f8e606e2f6869dac20bc4481b9e8c62d32c534 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 15 Sep 2018 00:23:55 +1000 Subject: [PATCH 113/138] changelog --- changelog.d/3872.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3872.misc diff --git a/changelog.d/3872.misc b/changelog.d/3872.misc new file mode 100644 index 0000000000..b450c506d8 --- /dev/null +++ b/changelog.d/3872.misc @@ -0,0 +1 @@ +Disable buffering and automatic retrying in treq requests to prevent timeouts. \ No newline at end of file From bc9af88a2d971cd75aad419c60e13b0778640b28 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 15 Sep 2018 00:26:00 +1000 Subject: [PATCH 114/138] fix --- synapse/http/client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index d60f87b04e..ec339a92ad 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -93,7 +93,7 @@ class SimpleHttpClient(object): outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) - logger.info("Sending request %s %s", method, redact_uri(uri.encode('ascii'))) + logger.info("Sending request %s %s", method, redact_uri(uri)) try: request_deferred = treq.request( @@ -108,14 +108,14 @@ class SimpleHttpClient(object): incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", - method, redact_uri(uri.encode('ascii')), response.code + method, redact_uri(uri), response.code ) defer.returnValue(response) except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", - method, redact_uri(uri.encode('ascii')), type(e).__name__, e.args[0] + method, redact_uri(uri), type(e).__name__, e.args[0] ) raise From 024be6cf1891fc20baac2356246595dc7f8ba5c5 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 14 Sep 2018 18:12:52 +0100 Subject: [PATCH 115/138] don't filter membership events based on history visibility (#3874) don't filter membership events based on history visibility as we will already have filtered the messages in the timeline, and state events are always visible. and because @erikjohnston said so. --- changelog.d/3874.bugfix | 0 synapse/handlers/pagination.py | 9 +-------- 2 files changed, 1 insertion(+), 8 deletions(-) create mode 100644 changelog.d/3874.bugfix diff --git a/changelog.d/3874.bugfix b/changelog.d/3874.bugfix new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 5170d093e3..a155b6e938 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -269,14 +269,7 @@ class PaginationHandler(object): if state_ids: state = yield self.store.get_events(list(state_ids.values())) - - if state: - state = yield filter_events_for_client( - self.store, - user_id, - state.values(), - is_peeking=(member_event_id is None), - ) + state = state.values() time_now = self.clock.time_msec() From fcfe7a850dada2e65982f2bc805d8bc409f07512 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 19:23:07 +0100 Subject: [PATCH 116/138] Add an awful secondary timeout to fix wedged requests This is an attempt to mitigate #3842 by adding yet-another-timeout --- synapse/http/matrixfederationclient.py | 11 ++++++ synapse/util/async_helpers.py | 51 ++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index da16b5dd8c..13b19f7626 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -42,6 +42,7 @@ from synapse.api.errors import ( ) from synapse.http.endpoint import matrix_federation_endpoint from synapse.util import logcontext +from synapse.util.async_helpers import timeout_no_seriously from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure @@ -228,6 +229,16 @@ class MatrixFederationHttpClient(object): ) request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) + # Sometimes the timeout above doesn't work, so lets hack yet + # another layer of timeouts in in the vain hope that at some + # point the world made sense and this really really really + # should work. + request_deferred = timeout_no_seriously( + request_deferred, + timeout=_sec_timeout * 2, + reactor=self.hs.get_reactor(), + ) + with Measure(self.clock, "outbound_request"): response = yield make_deferred_yieldable( request_deferred, diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 9b3f2f4b96..083e4f4128 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -438,3 +438,54 @@ def _cancelled_to_timed_out_error(value, timeout): value.trap(CancelledError) raise DeferredTimeoutError(timeout, "Deferred") return value + + +def timeout_no_seriously(deferred, timeout, reactor): + """The in build twisted deferred addTimeout (and the method above) + completely fail to time things out under some unknown circumstances. + + Lets try a different way of timing things out and maybe that will make + things work?! + + TODO: Kill this with fire. + """ + + new_d = defer.Deferred() + + timed_out = [False] + + def time_it_out(): + timed_out[0] = True + deferred.cancel() + + if not new_d.called: + new_d.errback(DeferredTimeoutError(timeout, "Deferred")) + + delayed_call = reactor.callLater(timeout, time_it_out) + + def convert_cancelled(value): + if timed_out[0]: + return _cancelled_to_timed_out_error(value, timeout) + return value + + deferred.addBoth(convert_cancelled) + + def cancel_timeout(result): + # stop the pending call to cancel the deferred if it's been fired + if delayed_call.active(): + delayed_call.cancel() + return result + + deferred.addBoth(cancel_timeout) + + def success_cb(val): + if not new_d.called: + new_d.callback(val) + + def failure_cb(val): + if not new_d.called: + new_d.errback(val) + + deferred.addCallbacks(success_cb, failure_cb) + + return new_d From 335b23a078b416b3ea97d1bef73da6ca5abd8d4b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Sep 2018 19:25:58 +0100 Subject: [PATCH 117/138] Newsfile --- changelog.d/3875.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3875.bugfix diff --git a/changelog.d/3875.bugfix b/changelog.d/3875.bugfix new file mode 100644 index 0000000000..2d2147dd4b --- /dev/null +++ b/changelog.d/3875.bugfix @@ -0,0 +1 @@ +Mitigate outbound federation randomly becoming wedged From 24efb2a70dfd8aa100507612836601991e11affc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 15 Sep 2018 11:38:39 +0100 Subject: [PATCH 118/138] Fix timeout function Turns out deferred.cancel sometimes throws, so we do that last to ensure that we always do resolve the new deferred. --- synapse/util/async_helpers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 083e4f4128..40c2946129 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -456,11 +456,12 @@ def timeout_no_seriously(deferred, timeout, reactor): def time_it_out(): timed_out[0] = True - deferred.cancel() if not new_d.called: new_d.errback(DeferredTimeoutError(timeout, "Deferred")) + deferred.cancel() + delayed_call = reactor.callLater(timeout, time_it_out) def convert_cancelled(value): From c8642720c9baf16ac434e2cb1b88e0805f2eb00f Mon Sep 17 00:00:00 2001 From: Vincent Breitmoser Date: Sat, 15 Sep 2018 22:03:27 +0200 Subject: [PATCH 119/138] mention libjemalloc in readme (#3877) --- README.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.rst b/README.rst index 2471619706..cfcf8b5219 100644 --- a/README.rst +++ b/README.rst @@ -963,5 +963,13 @@ variable. The default is 0.5, which can be decreased to reduce RAM usage in memory constrained enviroments, or increased if performance starts to degrade. +Using `libjemalloc `_ can also yield a significant +improvement in overall amount, and especially in terms of giving back RAM +to the OS. To use it, the library must simply be put in the LD_PRELOAD +environment variable when launching Synapse. On Debian, this can be done +by installing the ``libjemalloc1`` package and adding this line to +``/etc/default/matrix-synaspse``:: + + LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 .. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys From 9d13ff4da8483e4fdd66dd16e8ad4ac299f10e43 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 15 Sep 2018 21:04:10 +0100 Subject: [PATCH 120/138] missing changelog --- changelog.d/3877.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3877.misc diff --git a/changelog.d/3877.misc b/changelog.d/3877.misc new file mode 100644 index 0000000000..a80fec4bd8 --- /dev/null +++ b/changelog.d/3877.misc @@ -0,0 +1 @@ +mention jemalloc in the README From d42d79e3c3cc229fa6bb268a97edde4ac81e2df5 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 15 Sep 2018 22:27:41 +0100 Subject: [PATCH 121/138] don't ratelimit autojoins --- synapse/handlers/register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 1e53f2c635..da914c46ff 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -534,4 +534,5 @@ class RegistrationHandler(BaseHandler): room_id=room_id, remote_room_hosts=remote_room_hosts, action="join", + ratelimit=False, ) From c71b93f2a4bec7d12ce905388336e897a5227b35 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 15 Sep 2018 22:28:28 +0100 Subject: [PATCH 122/138] changelog --- changelog.d/3879.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3879.bugfix diff --git a/changelog.d/3879.bugfix b/changelog.d/3879.bugfix new file mode 100644 index 0000000000..82cb2a8ebc --- /dev/null +++ b/changelog.d/3879.bugfix @@ -0,0 +1 @@ +Don't ratelimit autojoins From 9c749a6b612d538a0d8374152ff358508f733734 Mon Sep 17 00:00:00 2001 From: Simon Dwyer Date: Sun, 16 Sep 2018 10:22:27 +1000 Subject: [PATCH 123/138] Added 'MAX_UPLOAD_SIZE' variable and set default to "10M" --- docker/conf/homeserver.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index 6bc25bb45f..cfe88788f2 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -85,7 +85,7 @@ federation_rc_concurrent: 3 media_store_path: "/data/media" uploads_path: "/data/uploads" -max_upload_size: "10M" +max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}" max_image_pixels: "32M" dynamic_thumbnails: false From f472abd792c5f3d969bb16c9ad7ee278b87ee761 Mon Sep 17 00:00:00 2001 From: Simon Dwyer Date: Sun, 16 Sep 2018 10:28:29 +1000 Subject: [PATCH 124/138] Added description for "SYNAPSE_MAX_UPLOAD_SIZE" variable. --- docker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/README.md b/docker/README.md index 038c78f7c0..ecdf8a8b17 100644 --- a/docker/README.md +++ b/docker/README.md @@ -88,6 +88,7 @@ variables are available for configuration: * ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN uris to enable TURN for this homeserver. * ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required. +* ``SYNAPSE_MAX_UPLOAD_SIZE``, the max upload size [default `10M`]. Shared secrets, that will be initialized to random values if not set: From da864a92c9eb8335fdd5602bcd4afe21c98cca39 Mon Sep 17 00:00:00 2001 From: Simon Dwyer Date: Sun, 16 Sep 2018 12:59:23 +1000 Subject: [PATCH 125/138] Added description for "SYNAPSE_MAX_UPLOAD_SIZE" variable. --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index ecdf8a8b17..3c00d1e948 100644 --- a/docker/README.md +++ b/docker/README.md @@ -88,7 +88,7 @@ variables are available for configuration: * ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN uris to enable TURN for this homeserver. * ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required. -* ``SYNAPSE_MAX_UPLOAD_SIZE``, the max upload size [default `10M`]. +* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`]. Shared secrets, that will be initialized to random values if not set: From 0e46ff690473538477037d5dbd2200f6863fc68e Mon Sep 17 00:00:00 2001 From: Simon Dwyer Date: Sun, 16 Sep 2018 13:33:33 +1000 Subject: [PATCH 126/138] Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. Signed-off-by: Simon Dwyer --- changelog.d/3883.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3883.feature diff --git a/changelog.d/3883.feature b/changelog.d/3883.feature new file mode 100644 index 0000000000..c11e5c5309 --- /dev/null +++ b/changelog.d/3883.feature @@ -0,0 +1 @@ +Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. \ No newline at end of file From e7b3b4d8c22952d4f090da6623fefb6c1caf0070 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 11:42:42 +0100 Subject: [PATCH 127/138] Remove nuke-room-from-db.sh script The problem with this script is that it is largely untested, entirely unmaintained, and running it is likely to make your synapse blow up in exciting ways. For example, it leaves a bunch of tables with dead values in it, like event_to_state_groups. Having it here sends a message that it is a supported part of synapse, which is absolutely not the case. --- scripts-dev/nuke-room-from-db.sh | 57 -------------------------------- 1 file changed, 57 deletions(-) delete mode 100755 scripts-dev/nuke-room-from-db.sh diff --git a/scripts-dev/nuke-room-from-db.sh b/scripts-dev/nuke-room-from-db.sh deleted file mode 100755 index c62928afdb..0000000000 --- a/scripts-dev/nuke-room-from-db.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -## CAUTION: -## This script will remove (hopefully) all trace of the given room ID from -## your homeserver.db - -## Do not run it lightly. - -set -e - -if [ "$1" == "-h" ] || [ "$1" == "" ]; then - echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run" - echo " nuke-room-from-db.sh | sqlite3 homeserver.db" - echo "or" - echo " nuke-room-from-db.sh | psql --dbname=synapse" - exit -fi - -ROOMID="$1" - -cat < Date: Mon, 17 Sep 2018 11:51:38 +0100 Subject: [PATCH 128/138] changelog --- changelog.d/3888.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3888.misc diff --git a/changelog.d/3888.misc b/changelog.d/3888.misc new file mode 100644 index 0000000000..a10ede547e --- /dev/null +++ b/changelog.d/3888.misc @@ -0,0 +1 @@ +Remove unmaintained "nuke-room-from-db.sh" script From 85a43f4167b5e775f7c3afe96d71137818561272 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 13:19:00 +0100 Subject: [PATCH 129/138] Return a 404 when deleting unknown room alias As per https://github.com/matrix-org/matrix-doc/issues/1675 Fixes https://github.com/matrix-org/synapse/issues/2782 --- synapse/handlers/directory.py | 19 ++++++++++++++++--- synapse/storage/directory.py | 1 - 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index ef866da1b6..c745e6740b 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -20,7 +20,14 @@ import string from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError +from synapse.api.errors import ( + AuthError, + CodeMessageException, + Codes, + NotFoundError, + StoreError, + SynapseError, +) from synapse.types import RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler @@ -109,7 +116,13 @@ class DirectoryHandler(BaseHandler): def delete_association(self, requester, user_id, room_alias): # association deletion for human users - can_delete = yield self._user_can_delete_alias(room_alias, user_id) + try: + can_delete = yield self._user_can_delete_alias(room_alias, user_id) + except StoreError as e: + if e.code == 404: + raise NotFoundError("Unknown room alias") + raise + if not can_delete: raise AuthError( 403, "You don't have permission to delete the alias.", @@ -320,7 +333,7 @@ class DirectoryHandler(BaseHandler): def _user_can_delete_alias(self, alias, user_id): creator = yield self.store.get_room_alias_creator(alias.to_string()) - if creator and creator == user_id: + if creator == user_id: defer.returnValue(True) is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 808194236a..cfb687cb53 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -75,7 +75,6 @@ class DirectoryWorkerStore(SQLBaseStore): }, retcol="creator", desc="get_room_alias_creator", - allow_none=True ) @cached(max_entries=5000) From c1ae6b1bce61fecda91aa6ab8a51d152418cf79e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 13:21:08 +0100 Subject: [PATCH 130/138] changelog --- changelog.d/3889.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3889.bugfix diff --git a/changelog.d/3889.bugfix b/changelog.d/3889.bugfix new file mode 100644 index 0000000000..e976425987 --- /dev/null +++ b/changelog.d/3889.bugfix @@ -0,0 +1 @@ +Fix 500 error when deleting unknown room alias From c9c50284d71669ad2496c2b0af30c392428a67e5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 17 Sep 2018 15:58:47 +0100 Subject: [PATCH 131/138] README: run python_dependencies with -m ... to stop things which try to import `types` getting `synapse.types` instead --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index cfcf8b5219..0147429aed 100644 --- a/README.rst +++ b/README.rst @@ -908,7 +908,7 @@ to install using pip and a virtualenv:: virtualenv -p python2.7 env source env/bin/activate - python synapse/python_dependencies.py | xargs pip install + python -m synapse.python_dependencies | xargs pip install pip install lxml mock This will run a process of downloading and installing all the needed From f00a9d2636211e6762d8c8c627ebee3f6cea05c9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 16:15:42 +0100 Subject: [PATCH 132/138] Fix some b'abcd' noise in logs and metrics Python 3 compatibility: make sure that we decode some byte sequences before we use them to create log lines and metrics labels. --- synapse/http/matrixfederationclient.py | 5 ++++- synapse/http/site.py | 8 ++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 13b19f7626..083484a687 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -244,7 +244,10 @@ class MatrixFederationHttpClient(object): request_deferred, ) - log_result = "%d %s" % (response.code, response.phrase,) + log_result = "%d %s" % ( + response.code, + response.phrase.decode('ascii', errors='replace'), + ) break except Exception as e: if not retry_on_dns_fail and isinstance(e, DNSLookupError): diff --git a/synapse/http/site.py b/synapse/http/site.py index e1e53e8ae5..2191de9836 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -82,7 +82,7 @@ class SynapseRequest(Request): ) def get_request_id(self): - return "%s-%i" % (self.method, self.request_seq) + return "%s-%i" % (self.method.decode('ascii'), self.request_seq) def get_redacted_uri(self): uri = self.uri @@ -119,7 +119,7 @@ class SynapseRequest(Request): # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics - requests_counter.labels(self.method, + requests_counter.labels(self.method.decode('ascii'), self.request_metrics.name).inc() @contextlib.contextmanager @@ -280,9 +280,9 @@ class SynapseRequest(Request): int(usage.db_txn_count), self.sentLength, code, - self.method, + self.method.decode('ascii'), self.get_redacted_uri(), - self.clientproto, + self.clientproto.decode('ascii', errors='replace'), user_agent, usage.evt_db_fetch_count, ) From 0cb7afff35dacb2b43d695536336a403b14d4b62 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 16:17:25 +0100 Subject: [PATCH 133/138] changelog --- changelog.d/3892.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3892.bugfix diff --git a/changelog.d/3892.bugfix b/changelog.d/3892.bugfix new file mode 100644 index 0000000000..8b30afab04 --- /dev/null +++ b/changelog.d/3892.bugfix @@ -0,0 +1 @@ +Fix some b'abcd' noise in logs and metrics From f75b9961c6c782d2ca4586782aa5cd2c0ae9a5b2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 16:52:02 +0100 Subject: [PATCH 134/138] Reinstate missing null check --- synapse/handlers/directory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index c745e6740b..18741c5fac 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -333,7 +333,7 @@ class DirectoryHandler(BaseHandler): def _user_can_delete_alias(self, alias, user_id): creator = yield self.store.get_room_alias_creator(alias.to_string()) - if creator == user_id: + if creator is not None and creator == user_id: defer.returnValue(True) is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) From ac80cb08fe247812b46c70843b2d5c9764cca619 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 17:16:50 +0100 Subject: [PATCH 135/138] Fix more b'abcd' noise in metrics --- synapse/http/request_metrics.py | 22 +++++++++++----------- synapse/http/site.py | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 72c2654678..fedb4e6b18 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -162,7 +162,7 @@ class RequestMetrics(object): with _in_flight_requests_lock: _in_flight_requests.add(self) - def stop(self, time_sec, request): + def stop(self, time_sec, response_code, sent_bytes): with _in_flight_requests_lock: _in_flight_requests.discard(self) @@ -179,35 +179,35 @@ class RequestMetrics(object): ) return - response_code = str(request.code) + response_code = str(response_code) - outgoing_responses_counter.labels(request.method, response_code).inc() + outgoing_responses_counter.labels(self.method, response_code).inc() - response_count.labels(request.method, self.name, tag).inc() + response_count.labels(self.method, self.name, tag).inc() - response_timer.labels(request.method, self.name, tag, response_code).observe( + response_timer.labels(self.method, self.name, tag, response_code).observe( time_sec - self.start ) resource_usage = context.get_resource_usage() - response_ru_utime.labels(request.method, self.name, tag).inc( + response_ru_utime.labels(self.method, self.name, tag).inc( resource_usage.ru_utime, ) - response_ru_stime.labels(request.method, self.name, tag).inc( + response_ru_stime.labels(self.method, self.name, tag).inc( resource_usage.ru_stime, ) - response_db_txn_count.labels(request.method, self.name, tag).inc( + response_db_txn_count.labels(self.method, self.name, tag).inc( resource_usage.db_txn_count ) - response_db_txn_duration.labels(request.method, self.name, tag).inc( + response_db_txn_duration.labels(self.method, self.name, tag).inc( resource_usage.db_txn_duration_sec ) - response_db_sched_duration.labels(request.method, self.name, tag).inc( + response_db_sched_duration.labels(self.method, self.name, tag).inc( resource_usage.db_sched_duration_sec ) - response_size.labels(request.method, self.name, tag).inc(request.sentLength) + response_size.labels(self.method, self.name, tag).inc(sent_bytes) # We always call this at the end to ensure that we update the metrics # regardless of whether a call to /metrics while the request was in diff --git a/synapse/http/site.py b/synapse/http/site.py index 2191de9836..9579e8cd0d 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -288,7 +288,7 @@ class SynapseRequest(Request): ) try: - self.request_metrics.stop(self.finish_time, self) + self.request_metrics.stop(self.finish_time, self.code, self.sentLength) except Exception as e: logger.warn("Failed to stop metrics: %r", e) From 06f2dbbb5dbe7cf9efe66376f47008e86a180f3b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Sep 2018 17:18:26 +0100 Subject: [PATCH 136/138] changelog --- changelog.d/3895.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3895.bugfix diff --git a/changelog.d/3895.bugfix b/changelog.d/3895.bugfix new file mode 100644 index 0000000000..8b30afab04 --- /dev/null +++ b/changelog.d/3895.bugfix @@ -0,0 +1 @@ +Fix some b'abcd' noise in logs and metrics From 4ecdf73fc7a9e1b8357c6f74897b33b32243dd3f Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 17 Sep 2018 22:05:23 -0500 Subject: [PATCH 137/138] Fix typo in README, synaspse -> synapse Signed-off-by: Aaron Raimist --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 0147429aed..9c0f9c09c8 100644 --- a/README.rst +++ b/README.rst @@ -968,7 +968,7 @@ improvement in overall amount, and especially in terms of giving back RAM to the OS. To use it, the library must simply be put in the LD_PRELOAD environment variable when launching Synapse. On Debian, this can be done by installing the ``libjemalloc1`` package and adding this line to -``/etc/default/matrix-synaspse``:: +``/etc/default/matrix-synapse``:: LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 From 505abb38f04b8b18adb0b5e5a150238f76b3e4fe Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 17 Sep 2018 22:07:19 -0500 Subject: [PATCH 138/138] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/3897.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3897.misc diff --git a/changelog.d/3897.misc b/changelog.d/3897.misc new file mode 100644 index 0000000000..87e7ac796e --- /dev/null +++ b/changelog.d/3897.misc @@ -0,0 +1 @@ +Fix typo in README, synaspse -> synapse \ No newline at end of file