From 923d9300ede819aa45da546fafc240f40263e7c5 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sat, 17 Feb 2018 21:53:46 -0700 Subject: [PATCH 01/96] Add a blurb explaining the main synapse worker Signed-off-by: Travis Ralston --- docs/workers.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/workers.rst b/docs/workers.rst index dee04bbf3e..a5e084c22a 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -115,6 +115,18 @@ To manipulate a specific worker, you pass the -w option to synctl:: synctl -w $CONFIG/workers/synchrotron.yaml restart +After setting up your workers, you'll need to create a worker configuration for +the main synapse process. That worker configuration should look like this::: + + worker_app: synapse.app.homeserver + daemonize: true + +Be sure to keep this particular configuration limited as synapse may refuse to +start if the regular ``worker_*`` options are given. The ``homeserver.yaml`` +configuration will be used to set up the main synapse process. + +**You must have a worker configuration for the main synapse process!** + Available worker applications ----------------------------- From 47ce527f459e0a28a45a2299db799ea18d632021 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 13 Mar 2018 14:10:07 +0100 Subject: [PATCH 02/96] Add room_id to the response of `rooms/{roomId}/join` Fixes #2349 --- synapse/rest/client/v1/room.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index f8999d64d7..6dc31bf9ae 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -655,7 +655,12 @@ class RoomMembershipRestServlet(ClientV1RestServlet): content=event_content, ) - defer.returnValue((200, {})) + return_value = {} + + if membership_action == "join": + return_value["room_id"] = room_id + + defer.returnValue((200, return_value)) def _has_3pid_invite_keys(self, content): for key in {"id_server", "medium", "address"}: From 2cc9f76bc3cfa012dcdfe614bdda7e689b8b5e65 Mon Sep 17 00:00:00 2001 From: NotAFile Date: Thu, 15 Mar 2018 16:11:17 +0100 Subject: [PATCH 03/96] replace old style error catching with 'as' keyword This is both easier to read and compatible with python3 (not that that matters) Signed-off-by: Adrian Tschira --- synapse/app/synctl.py | 4 ++-- synapse/handlers/device.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index 0f0ddfa78a..b0e1b5e66a 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -38,7 +38,7 @@ def pid_running(pid): try: os.kill(pid, 0) return True - except OSError, err: + except OSError as err: if err.errno == errno.EPERM: return True return False @@ -98,7 +98,7 @@ def stop(pidfile, app): try: os.kill(pid, signal.SIGTERM) write("stopped %s" % (app,), colour=GREEN) - except OSError, err: + except OSError as err: if err.errno == errno.ESRCH: write("%s not running" % (app,), colour=YELLOW) elif err.errno == errno.EPERM: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 40f3d24678..f7457a7082 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -155,7 +155,7 @@ class DeviceHandler(BaseHandler): try: yield self.store.delete_device(user_id, device_id) - except errors.StoreError, e: + except errors.StoreError as e: if e.code == 404: # no match pass @@ -204,7 +204,7 @@ class DeviceHandler(BaseHandler): try: yield self.store.delete_devices(user_id, device_ids) - except errors.StoreError, e: + except errors.StoreError as e: if e.code == 404: # no match pass @@ -243,7 +243,7 @@ class DeviceHandler(BaseHandler): new_display_name=content.get("display_name") ) yield self.notify_device_update(user_id, [device_id]) - except errors.StoreError, e: + except errors.StoreError as e: if e.code == 404: raise errors.NotFoundError() else: From 9cf519769b70311d06b43a97e47cfb5a06e6ade4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 15 Mar 2018 17:48:42 +0000 Subject: [PATCH 04/96] Use .iter* to avoid copies in StateHandler --- synapse/state.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/synapse/state.py b/synapse/state.py index 932f602508..18386556ae 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -132,7 +132,7 @@ class StateHandler(object): state_map = yield self.store.get_events(state.values(), get_prev_content=False) state = { - key: state_map[e_id] for key, e_id in state.items() if e_id in state_map + key: state_map[e_id] for key, e_id in state.iteritems() if e_id in state_map } defer.returnValue(state) @@ -378,7 +378,7 @@ class StateHandler(object): new_state = resolve_events_with_state_map(state_set_ids, state_map) new_state = { - key: state_map[ev_id] for key, ev_id in new_state.items() + key: state_map[ev_id] for key, ev_id in new_state.iteritems() } return new_state @@ -458,15 +458,15 @@ class StateResolutionHandler(object): # build a map from state key to the event_ids which set that state. # dict[(str, str), set[str]) state = {} - for st in state_groups_ids.values(): - for key, e_id in st.items(): + for st in state_groups_ids.itervalues(): + for key, e_id in st.iteritems(): state.setdefault(key, set()).add(e_id) # build a map from state key to the event_ids which set that state, # including only those where there are state keys in conflict. conflicted_state = { k: list(v) - for k, v in state.items() + for k, v in state.iteritems() if len(v) > 1 } @@ -480,7 +480,7 @@ class StateResolutionHandler(object): ) else: new_state = { - key: e_ids.pop() for key, e_ids in state.items() + key: e_ids.pop() for key, e_ids in state.iteritems() } # if the new state matches any of the input state groups, we can @@ -488,8 +488,8 @@ class StateResolutionHandler(object): # which will be used as a cache key for future resolutions, but # not get persisted. state_group = None - new_state_event_ids = frozenset(new_state.values()) - for sg, events in state_groups_ids.items(): + new_state_event_ids = frozenset(new_state.iteritems()) + for sg, events in state_groups_ids.iteritems(): if new_state_event_ids == frozenset(e_id for e_id in events): state_group = sg break @@ -702,7 +702,7 @@ def _resolve_with_state(unconflicted_state_ids, conflicted_state_ds, auth_event_ auth_events = { key: state_map[ev_id] - for key, ev_id in auth_event_ids.items() + for key, ev_id in auth_event_ids.iteritems() if ev_id in state_map } @@ -740,7 +740,7 @@ def _resolve_state_events(conflicted_state, auth_events): auth_events.update(resolved_state) - for key, events in conflicted_state.items(): + for key, events in conflicted_state.iteritems(): if key[0] == EventTypes.JoinRules: logger.debug("Resolving conflicted join rules %r", events) resolved_state[key] = _resolve_auth_events( @@ -750,7 +750,7 @@ def _resolve_state_events(conflicted_state, auth_events): auth_events.update(resolved_state) - for key, events in conflicted_state.items(): + for key, events in conflicted_state.iteritems(): if key[0] == EventTypes.Member: logger.debug("Resolving conflicted member lists %r", events) resolved_state[key] = _resolve_auth_events( @@ -760,7 +760,7 @@ def _resolve_state_events(conflicted_state, auth_events): auth_events.update(resolved_state) - for key, events in conflicted_state.items(): + for key, events in conflicted_state.iteritems(): if key not in resolved_state: logger.debug("Resolving conflicted state %r:%r", key, events) resolved_state[key] = _resolve_normal_events( From 1c41b05c8c98f0b9157c791b5b8ebf5f9fe85acf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 21 Mar 2018 17:46:26 +0000 Subject: [PATCH 05/96] Add Cache-Control headers to all JSON APIs It is especially important that sync requests don't get cached, as if a sync returns the same token given then the client will call sync with the same parameters again. If the previous response was cached it will get reused, resulting in the client tight looping making the same request and never making any progress. In general, clients will expect to get up to date data when requesting APIs, and so its safer to do a blanket no cache policy than only whitelisting APIs that we know will break things if they get cached. --- synapse/http/server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/http/server.py b/synapse/http/server.py index 1551db239d..f19c068ef6 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -488,6 +488,7 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False, request.setHeader(b"Content-Type", b"application/json") request.setHeader(b"Server", version_string) request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),)) + request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate") if send_cors: set_cors_headers(request) From fde8e8f09fb0e62d4670e33b75319b5bd57f484f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Mar 2018 11:42:16 +0000 Subject: [PATCH 06/96] Fix s/iteriterms/itervalues --- synapse/state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/state.py b/synapse/state.py index 18386556ae..a7f20350f1 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -488,7 +488,7 @@ class StateResolutionHandler(object): # which will be used as a cache key for future resolutions, but # not get persisted. state_group = None - new_state_event_ids = frozenset(new_state.iteritems()) + new_state_event_ids = frozenset(new_state.itervalues()) for sg, events in state_groups_ids.iteritems(): if new_state_event_ids == frozenset(e_id for e_id in events): state_group = sg From 8cbbfaefc1fc597cbbef52a10dbfb8ecd4d8a8cd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 23 Mar 2018 10:32:50 +0000 Subject: [PATCH 07/96] 404 correctly on missing paths via NoResource fixes https://github.com/matrix-org/synapse/issues/2043 and https://github.com/matrix-org/synapse/issues/2029 --- synapse/app/appservice.py | 4 ++-- synapse/app/client_reader.py | 4 ++-- synapse/app/event_creator.py | 4 ++-- synapse/app/federation_reader.py | 4 ++-- synapse/app/federation_sender.py | 4 ++-- synapse/app/frontend_proxy.py | 4 ++-- synapse/app/homeserver.py | 4 ++-- synapse/app/media_repository.py | 4 ++-- synapse/app/pusher.py | 4 ++-- synapse/app/synchrotron.py | 4 ++-- synapse/app/user_dir.py | 4 ++-- synapse/util/httpresourcetree.py | 4 ++-- 12 files changed, 24 insertions(+), 24 deletions(-) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index c6fe4516d1..f2540023a7 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -36,7 +36,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.appservice") @@ -64,7 +64,7 @@ class AppserviceServer(HomeServer): if name == "metrics": resources[METRICS_PREFIX] = MetricsResource(self) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 0a8ce9bc66..267d34c881 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.client_reader") @@ -88,7 +88,7 @@ class ClientReaderServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 172e989b54..b915d12d53 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -52,7 +52,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.event_creator") @@ -104,7 +104,7 @@ class EventCreatorServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 20d157911b..c1dc66dd17 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -41,7 +41,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.federation_reader") @@ -77,7 +77,7 @@ class FederationReaderServer(HomeServer): FEDERATION_PREFIX: TransportLayerServer(self), }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index f760826d27..0cc3331519 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -42,7 +42,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.federation_sender") @@ -91,7 +91,7 @@ class FederationSenderServer(HomeServer): if name == "metrics": resources[METRICS_PREFIX] = MetricsResource(self) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 816c080d18..de889357c3 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.frontend_proxy") @@ -142,7 +142,7 @@ class FrontendProxyServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e477c7ced6..c00afbba28 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -56,7 +56,7 @@ from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string from twisted.application import service from twisted.internet import defer, reactor -from twisted.web.resource import EncodingResourceWrapper, Resource +from twisted.web.resource import EncodingResourceWrapper, NoResource from twisted.web.server import GzipEncoderFactory from twisted.web.static import File @@ -126,7 +126,7 @@ class SynapseHomeServer(HomeServer): if WEB_CLIENT_PREFIX in resources: root_resource = RootRedirect(WEB_CLIENT_PREFIX) else: - root_resource = Resource() + root_resource = NoResource() root_resource = create_resource_tree(resources, root_resource) diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 84c5791b3b..fc8282bbc1 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.media_repository") @@ -84,7 +84,7 @@ class MediaRepositoryServer(HomeServer): ), }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 98a4a7c62c..d5c3a85195 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -37,7 +37,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.pusher") @@ -94,7 +94,7 @@ class PusherServer(HomeServer): if name == "metrics": resources[METRICS_PREFIX] = MetricsResource(self) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index abe91dcfbd..508b66613d 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -56,7 +56,7 @@ from synapse.util.manhole import manhole from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.synchrotron") @@ -269,7 +269,7 @@ class SynchrotronServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 494ccb702c..5f845e80d1 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import reactor -from twisted.web.resource import Resource +from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.user_dir") @@ -116,7 +116,7 @@ class UserDirectoryServer(HomeServer): "/_matrix/client/api/v1": resource, }) - root_resource = create_resource_tree(resources, Resource()) + root_resource = create_resource_tree(resources, NoResource()) _base.listen_tcp( bind_addresses, diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py index 45be47159a..d747849553 100644 --- a/synapse/util/httpresourcetree.py +++ b/synapse/util/httpresourcetree.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.web.resource import Resource +from twisted.web.resource import NoResource import logging @@ -45,7 +45,7 @@ def create_resource_tree(desired_tree, root_resource): for path_seg in full_path.split('/')[1:-1]: if path_seg not in last_resource.listNames(): # resource doesn't exist, so make a "dummy resource" - child_resource = Resource() + child_resource = NoResource() last_resource.putChild(path_seg, child_resource) res_id = _resource_id(last_resource, path_seg) resource_mappings[res_id] = child_resource From 8efe773ef1dc09c295fe494befbef568257095db Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 23 Mar 2018 21:38:42 +0000 Subject: [PATCH 08/96] fix typo --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index bac78d3c1c..00c8a2b77d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -39,7 +39,7 @@ Features: Changes: -* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst `_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784) +* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst `_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784) * Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849) * Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh! * No longer require a specific version of saml2 (PR #2695) Thanks to @okurz! From a052aa42e7d500f32b034bbbf1e8ad11fed02aa9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Mar 2018 12:02:20 +0100 Subject: [PATCH 09/96] Linearize calls to _generate_user_id --- synapse/handlers/register.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index ed5939880a..d07a8801de 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -24,7 +24,7 @@ from synapse.api.errors import ( from synapse.http.client import CaptchaServerHttpClient from synapse import types from synapse.types import UserID -from synapse.util.async import run_on_reactor +from synapse.util.async import run_on_reactor, Linearizer from synapse.util.threepids import check_3pid_allowed from ._base import BaseHandler @@ -46,6 +46,8 @@ class RegistrationHandler(BaseHandler): self.macaroon_gen = hs.get_macaroon_generator() + self._generate_user_id_linearizer = Linearizer(name="_generate_user_id_linearizer") + @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None, assigned_user_id=None): @@ -345,9 +347,10 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _generate_user_id(self, reseed=False): if reseed or self._next_generated_user_id is None: - self._next_generated_user_id = ( - yield self.store.find_next_generated_user_id_localpart() - ) + with (yield self._generate_user_id_linearizer.queue(())): + self._next_generated_user_id = ( + yield self.store.find_next_generated_user_id_localpart() + ) id = self._next_generated_user_id self._next_generated_user_id += 1 From 8d6dc106d188308f687db1f696cad7ad1b9ae83d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Mar 2018 12:02:44 +0100 Subject: [PATCH 10/96] Don't use _cursor_to_dict in find_next_generated_user_id_localpart --- synapse/storage/registration.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index d809b2ba46..6b557ca0cf 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -460,14 +460,12 @@ class RegistrationStore(RegistrationWorkerStore, """ def _find_next_generated_user_id(txn): txn.execute("SELECT name FROM users") - rows = self.cursor_to_dict(txn) regex = re.compile("^@(\d+):") found = set() - for r in rows: - user_id = r["name"] + for user_id, in txn: match = regex.search(user_id) if match: found.add(int(match.group(1))) From 44cd6e135806387552bab5cf7e8be256d53632b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Mar 2018 12:06:48 +0100 Subject: [PATCH 11/96] PEP8 --- synapse/handlers/register.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index d07a8801de..f1bf81ed7d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -46,7 +46,9 @@ class RegistrationHandler(BaseHandler): self.macaroon_gen = hs.get_macaroon_generator() - self._generate_user_id_linearizer = Linearizer(name="_generate_user_id_linearizer") + self._generate_user_id_linearizer = Linearizer( + name="_generate_user_id_linearizer", + ) @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None, From fecb45e0c38912f72655d53faa953e7713511e7f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Mar 2018 13:31:49 +0100 Subject: [PATCH 12/96] Remove last usage of ujson --- synapse/python_dependencies.py | 1 - synapse/storage/group_server.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 91179ce532..40eedb63cb 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -34,7 +34,6 @@ REQUIREMENTS = { "bcrypt": ["bcrypt>=3.1.0"], "pillow": ["PIL"], "pydenticon": ["pydenticon"], - "ujson": ["ujson"], "blist": ["blist"], "pysaml2>=3.0.0": ["saml2>=3.0.0"], "pymacaroons-pynacl": ["pymacaroons"], diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 8fde1aab8e..d03858234b 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError from ._base import SQLBaseStore -import ujson as json +import simplejson as json # The category ID for the "default" category. We don't store as null in the From 3f49e131d9a31d5bf198f2aa6b102562b2e747d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Mar 2018 10:30:03 +0100 Subject: [PATCH 13/96] Add counter metrics for calculating state delta This will allow us to measure how often we calculate state deltas in event persistence that we would have been able to calculate at the same time we calculated the state for the event. --- synapse/storage/events.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 85ce6bea1a..f3a362db20 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -52,6 +52,15 @@ persist_event_counter = metrics.register_counter("persisted_events") event_counter = metrics.register_counter( "persisted_events_sep", labels=["type", "origin_type", "origin_entity"] ) +state_delta_counter = metrics.register_counter( + "state_delta", +) +state_delta_single_event_counter = metrics.register_counter( + "state_delta_single_event", +) +state_delta_reuse_delta_counter = metrics.register_counter( + "state_delta_reuse_delta", +) def encode_json(json_object): @@ -368,7 +377,8 @@ class EventsStore(EventsWorkerStore): room_id, ev_ctx_rm, latest_event_ids ) - if new_latest_event_ids == set(latest_event_ids): + latest_event_ids = set(latest_event_ids) + if new_latest_event_ids == latest_event_ids: # No change in extremities, so no change in state continue @@ -389,6 +399,25 @@ class EventsStore(EventsWorkerStore): if all_single_prev_not_state: continue + state_delta_counter.inc() + if len(new_latest_event_ids) == 1: + state_delta_single_event_counter.inc() + + # This is a fairly handwavey check to see if we could + # have guessed what the delta would have been when + # processing one of these events. + # What we're interested in is if the latest extremities + # were the same when we created the event as they are + # now. We guess this by looking at the prev events and + # checking if they match up, as when this server creates + # a new event it will use the extremities as the prev + # events. + for ev, _ in ev_ctx_rm: + prev_event_ids = set(e for e, _ in ev.prev_events) + if latest_event_ids == prev_event_ids: + state_delta_reuse_delta_counter.inc() + break + logger.info( "Calculating state delta for room %s", room_id, ) From 3e0c0660b31827f9d13c1fb5153a77e70813d5d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Mar 2018 13:01:34 +0100 Subject: [PATCH 14/96] Also do check inside linearizer --- synapse/handlers/register.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index f1bf81ed7d..dd03705279 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -350,9 +350,10 @@ class RegistrationHandler(BaseHandler): def _generate_user_id(self, reseed=False): if reseed or self._next_generated_user_id is None: with (yield self._generate_user_id_linearizer.queue(())): - self._next_generated_user_id = ( - yield self.store.find_next_generated_user_id_localpart() - ) + if reseed or self._next_generated_user_id is None: + self._next_generated_user_id = ( + yield self.store.find_next_generated_user_id_localpart() + ) id = self._next_generated_user_id self._next_generated_user_id += 1 From e70287cff3adeeeff2a7a1e95fc5845feb365710 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Mar 2018 13:13:38 +0100 Subject: [PATCH 15/96] Comment --- synapse/storage/events.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f3a362db20..ee8b3c46d3 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -52,12 +52,19 @@ persist_event_counter = metrics.register_counter("persisted_events") event_counter = metrics.register_counter( "persisted_events_sep", labels=["type", "origin_type", "origin_entity"] ) + +# The number of times we are recalculating the current state state_delta_counter = metrics.register_counter( "state_delta", ) +# The number of times we are recalculating state when there is only a +# single forward extremity state_delta_single_event_counter = metrics.register_counter( "state_delta_single_event", ) +# The number of times we are reculating state when we could have resonably +# calculated the delta when we calculated the state for an event we were +# persisting. state_delta_reuse_delta_counter = metrics.register_counter( "state_delta_reuse_delta", ) From 152c2ac19e6af373a9e79a8f4da80d9e77a5396f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Mar 2018 13:13:46 +0100 Subject: [PATCH 16/96] Fix indent --- synapse/storage/events.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index ee8b3c46d3..95b36d669b 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -423,7 +423,7 @@ class EventsStore(EventsWorkerStore): prev_event_ids = set(e for e, _ in ev.prev_events) if latest_event_ids == prev_event_ids: state_delta_reuse_delta_counter.inc() - break + break logger.info( "Calculating state delta for room %s", room_id, From 800cfd5774d9726a00c53f2d84ce0286ab64c17b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Mar 2018 13:30:39 +0100 Subject: [PATCH 17/96] Comment --- synapse/storage/events.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 95b36d669b..f3d65f4338 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -415,10 +415,11 @@ class EventsStore(EventsWorkerStore): # processing one of these events. # What we're interested in is if the latest extremities # were the same when we created the event as they are - # now. We guess this by looking at the prev events and - # checking if they match up, as when this server creates - # a new event it will use the extremities as the prev - # events. + # now. When this server creates a new event (as opposed + # to receiving it over federation) it will use the + # forward extremities as the prev_events, so we can + # guess this by looking at the prev_events and checking + # if they match the current forward extremities. for ev, _ in ev_ctx_rm: prev_event_ids = set(e for e, _ in ev.prev_events) if latest_event_ids == prev_event_ids: From c2a5cf2fe32d2cd582711669b7c0ce74682e1c05 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 27 Mar 2018 17:07:31 +0100 Subject: [PATCH 18/96] factor out exception handling for keys/claim and keys/query this stuff is badly c&p'ed --- synapse/handlers/e2e_keys.py | 53 +++++++++++++++++------------------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 80b359b2e7..41521e6990 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -134,23 +135,8 @@ class E2eKeysHandler(object): if user_id in destination_query: results[user_id] = keys - except CodeMessageException as e: - failures[destination] = { - "status": e.code, "message": e.message - } - except NotRetryingDestination as e: - failures[destination] = { - "status": 503, "message": "Not ready for retry", - } - except FederationDeniedError as e: - failures[destination] = { - "status": 403, "message": "Federation Denied", - } except Exception as e: - # include ConnectionRefused and other errors - failures[destination] = { - "status": 503, "message": e.message - } + failures[destination] = _exception_to_failure(e) yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(do_remote_query)(destination) @@ -252,19 +238,8 @@ class E2eKeysHandler(object): for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: json_result[user_id] = keys - except CodeMessageException as e: - failures[destination] = { - "status": e.code, "message": e.message - } - except NotRetryingDestination as e: - failures[destination] = { - "status": 503, "message": "Not ready for retry", - } except Exception as e: - # include ConnectionRefused and other errors - failures[destination] = { - "status": 503, "message": e.message - } + failures[destination] = _exception_to_failure(e) yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(claim_client_keys)(destination) @@ -362,6 +337,28 @@ class E2eKeysHandler(object): ) +def _exception_to_failure(e): + if isinstance(e, CodeMessageException): + return { + "status": e.code, "message": e.message, + } + + if isinstance(e, NotRetryingDestination): + return { + "status": 503, "message": "Not ready for retry", + } + + if isinstance(e, FederationDeniedError): + return { + "status": 403, "message": "Federation Denied", + } + + # include ConnectionRefused and other errors + return { + "status": 503, "message": e.message, + } + + def _one_time_keys_match(old_key_json, new_key): old_key = json.loads(old_key_json) From a134c572a6697fa6443525493e3fc13f74452d34 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 27 Mar 2018 17:15:06 +0100 Subject: [PATCH 19/96] Stringify exceptions for keys/{query,claim} Make sure we stringify any exceptions we return from keys/query and keys/claim, to avoid a 'not JSON serializable' error later Fixes #3010 --- synapse/handlers/e2e_keys.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 41521e6990..325c0c4a9f 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -354,8 +354,11 @@ def _exception_to_failure(e): } # include ConnectionRefused and other errors + # + # Note that some Exceptions (notably twisted's ResponseFailed etc) don't + # give a string for e.message, which simplejson then fails to serialize. return { - "status": 503, "message": e.message, + "status": 503, "message": str(e.message), } From ef520d8d0e152a24fb6660fdd2def214b6e9caae Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Tue, 27 Mar 2018 14:12:22 +0100 Subject: [PATCH 20/96] Include coarse CPU and Memory use in stats callbacks. This requires the psutil module, and is still opt-in based on the report_stats config option. --- UPGRADE.rst | 12 ++++++++++++ synapse/app/homeserver.py | 20 ++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/UPGRADE.rst b/UPGRADE.rst index 2efe7ea60f..f6bb1070b1 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -48,6 +48,18 @@ returned by the Client-Server API: # configured on port 443. curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" +Upgrading to $NEXT_VERSION +==================== + +This release expands the anonymous usage stats sent if the opt-in +``report_stats`` configuration is set to ``true``. We now capture RSS memory +and cpu use at a very coarse level. This requires administrators to install +the optional ``psutil`` python module. + +We would appreciate it if you could assist by ensuring this module is available +and ``report_stats`` is enabled. This will let us see if performance changes to +synapse are having an impact to the general community. + Upgrading to v0.15.0 ==================== diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index c00afbba28..313be42ded 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -401,6 +401,7 @@ def run(hs): start_time = clock.time() stats = {} + stats_process = None @defer.inlineCallbacks def phone_stats_home(): @@ -427,6 +428,10 @@ def run(hs): daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages + if stats_process is not None: + with stats_process.oneshot(): + stats["memory_rss"] = stats_process.memory_info().rss + stats["cpu_average"] = int(stats_process.cpu_info(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: @@ -438,6 +443,21 @@ def run(hs): logger.warn("Error reporting stats: %s", e) if hs.config.report_stats: + try: + import psutil + stats_process = psutil.Process() + # Ensure we can fetch both, and make the initial request for cpu_percent + # so the next request will use this as the initial point. + stats_process.memory_info().rss + stats_process.cpu_percent(interval=None) + except (ImportError, AttributeError): + logger.warn( + "report_stats enabled but psutil is not installed or incorrect version." + " Disabling reporting of memory/cpu stats." + " Ensuring psutil is available will help matrix track performance changes across releases." + ) + stats_process = None + logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000) From 9187e0762f0b4f028d15fac4502e458f513d6642 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 10:02:32 +0100 Subject: [PATCH 21/96] count_daily_users failed if db was sqlite due to type failure - presumably this prevcented all sqlite homeservers reporting home --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index de00cae447..b97e5e5ff4 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -260,7 +260,7 @@ class DataStore(RoomMemberStore, RoomStore, ) u """ - txn.execute(sql, (yesterday,)) + txn.execute(sql, (str(yesterday),)) count, = txn.fetchone() return count From a32d2548d986f7075e8310184ce0b70c69513a02 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 10:39:13 +0100 Subject: [PATCH 22/96] query and call for r30 stats --- synapse/app/homeserver.py | 2 ++ synapse/storage/__init__.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index c00afbba28..8bce9f1ace 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -425,6 +425,8 @@ def run(hs): stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() + stats["r30_users"] = yield hs.get_datastore().count_r30_users() + daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index b97e5e5ff4..10f99c3cd5 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -267,6 +267,42 @@ class DataStore(RoomMemberStore, RoomStore, ret = yield self.runInteraction("count_users", _count_users) defer.returnValue(ret) + @defer.inlineCallbacks + def count_r30_users(self): + """ + Counts the number of 30 day retained users, defined as:- + * Users who have created their accounts more than 30 days + * Where last seen at most 30 days ago + * Where account creation and last_seen are > 30 days + """ + def _count_r30_users(txn): + thirty_days_in_secs = 86400 * 30 + now = int(self._clock.time_msec()) + thirty_days_ago_in_secs = now - thirty_days_in_secs + + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT users.name, users.creation_ts * 1000, MAX(user_ips.last_seen) + FROM users, user_ips + WHERE users.name = user_ips.user_id + AND appservice_id is NULL + AND users.creation_ts < ? + AND user_ips.last_seen/1000 > ? + AND (user_ips.last_seen/1000) - users.creation_ts > ? + GROUP BY users.name, users.creation_ts + ) u + """ + + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs, + thirty_days_in_secs)) + + count, = txn.fetchone() + return count + + ret = yield self.runInteraction("count_r30_users", _count_r30_users) + defer.returnValue(ret) + def get_users(self): """Function to reterive a list of users in users table. From a9cb1a35c85f62bb0114dabd62d118c80d66e415 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 10:57:27 +0100 Subject: [PATCH 23/96] fix tests/storage/test_user_directory.py --- tests/storage/test_user_directory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 0891308f25..88add45217 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -62,7 +62,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase): self.assertFalse(r["limited"]) self.assertEqual(1, len(r["results"])) self.assertDictEqual(r["results"][0], { - "user_id": BOB, + "d.user_id": BOB, "display_name": "bob", "avatar_url": None, }) From 01ccc9e6f25a87d7906d7907afd9e8527228215b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Mar 2018 11:03:52 +0100 Subject: [PATCH 24/96] Measure time it takes to calculate state group ID --- synapse/state.py | 51 ++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/synapse/state.py b/synapse/state.py index a7f20350f1..26093c8434 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -483,33 +483,34 @@ class StateResolutionHandler(object): key: e_ids.pop() for key, e_ids in state.iteritems() } - # if the new state matches any of the input state groups, we can - # use that state group again. Otherwise we will generate a state_id - # which will be used as a cache key for future resolutions, but - # not get persisted. - state_group = None - new_state_event_ids = frozenset(new_state.itervalues()) - for sg, events in state_groups_ids.iteritems(): - if new_state_event_ids == frozenset(e_id for e_id in events): - state_group = sg - break + with Measure(self.clock, "state.create_group_ids"): + # if the new state matches any of the input state groups, we can + # use that state group again. Otherwise we will generate a state_id + # which will be used as a cache key for future resolutions, but + # not get persisted. + state_group = None + new_state_event_ids = frozenset(new_state.itervalues()) + for sg, events in state_groups_ids.iteritems(): + if new_state_event_ids == frozenset(e_id for e_id in events): + state_group = sg + break - # TODO: We want to create a state group for this set of events, to - # increase cache hits, but we need to make sure that it doesn't - # end up as a prev_group without being added to the database + # TODO: We want to create a state group for this set of events, to + # increase cache hits, but we need to make sure that it doesn't + # end up as a prev_group without being added to the database - prev_group = None - delta_ids = None - for old_group, old_ids in state_groups_ids.iteritems(): - if not set(new_state) - set(old_ids): - n_delta_ids = { - k: v - for k, v in new_state.iteritems() - if old_ids.get(k) != v - } - if not delta_ids or len(n_delta_ids) < len(delta_ids): - prev_group = old_group - delta_ids = n_delta_ids + prev_group = None + delta_ids = None + for old_group, old_ids in state_groups_ids.iteritems(): + if not set(new_state) - set(old_ids): + n_delta_ids = { + k: v + for k, v in new_state.iteritems() + if old_ids.get(k) != v + } + if not delta_ids or len(n_delta_ids) < len(delta_ids): + prev_group = old_group + delta_ids = n_delta_ids cache = _StateCacheEntry( state=new_state, From 545001b9e4b1d6710145d3efe2117fbdf823fb38 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 11:19:45 +0100 Subject: [PATCH 25/96] Fix search_user_dir multiple sqlite versions do different things --- synapse/storage/user_directory.py | 4 ++-- tests/storage/test_user_directory.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index dfdcbb3181..d6e289ffbe 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -667,7 +667,7 @@ class UserDirectoryStore(SQLBaseStore): # The array of numbers are the weights for the various part of the # search: (domain, _, display name, localpart) sql = """ - SELECT d.user_id, display_name, avatar_url + SELECT d.user_id AS user_id, display_name, avatar_url FROM user_directory_search INNER JOIN user_directory AS d USING (user_id) %s @@ -702,7 +702,7 @@ class UserDirectoryStore(SQLBaseStore): search_query = _parse_query_sqlite(search_term) sql = """ - SELECT d.user_id, display_name, avatar_url + SELECT d.user_id AS user_id, display_name, avatar_url FROM user_directory_search INNER JOIN user_directory AS d USING (user_id) %s diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 88add45217..0891308f25 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -62,7 +62,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase): self.assertFalse(r["limited"]) self.assertEqual(1, len(r["results"])) self.assertDictEqual(r["results"][0], { - "d.user_id": BOB, + "user_id": BOB, "display_name": "bob", "avatar_url": None, }) From 0f890f477eb2ed03b8fd48710d1960210f44a334 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 11:49:57 +0100 Subject: [PATCH 26/96] No need to cast in count_daily_users --- synapse/storage/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 10f99c3cd5..ba43b2d8ec 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -250,7 +250,7 @@ class DataStore(RoomMemberStore, RoomStore, Counts the number of users who used this homeserver in the last 24 hours. """ def _count_users(txn): - yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24), + yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) sql = """ SELECT COALESCE(count(*), 0) FROM ( @@ -260,7 +260,7 @@ class DataStore(RoomMemberStore, RoomStore, ) u """ - txn.execute(sql, (str(yesterday),)) + txn.execute(sql, (yesterday,)) count, = txn.fetchone() return count From 788e69098c93f2433ef907015666c624bb39318f Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 12:03:13 +0100 Subject: [PATCH 27/96] Add user_ips last seen index --- synapse/storage/client_ips.py | 7 +++++++ .../delta/48/add_user_ips_last_seen_index.sql | 17 +++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 synapse/storage/schema/delta/48/add_user_ips_last_seen_index.sql diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index a03d1d6104..7b44dae0fc 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -48,6 +48,13 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): columns=["user_id", "device_id", "last_seen"], ) + self.register_background_index_update( + "user_ips_last_seen_index", + index_name="user_ips_last_seen", + table="user_ips", + columns=["user_id", "last_seen"], + ) + # (user_id, access_token, ip) -> (user_agent, device_id, last_seen) self._batch_row_update = {} diff --git a/synapse/storage/schema/delta/48/add_user_ips_last_seen_index.sql b/synapse/storage/schema/delta/48/add_user_ips_last_seen_index.sql new file mode 100644 index 0000000000..9248b0b24a --- /dev/null +++ b/synapse/storage/schema/delta/48/add_user_ips_last_seen_index.sql @@ -0,0 +1,17 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT into background_updates (update_name, progress_json) + VALUES ('user_ips_last_seen_index', '{}'); From 4ceaa7433a324afab23c4a445cabe3da965e5846 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Wed, 28 Mar 2018 12:08:09 +0100 Subject: [PATCH 28/96] As daemonizing will make a new process, defer call to init. --- synapse/app/homeserver.py | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 313be42ded..0737945ede 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -401,7 +401,7 @@ def run(hs): start_time = clock.time() stats = {} - stats_process = None + stats_process = [] @defer.inlineCallbacks def phone_stats_home(): @@ -428,10 +428,13 @@ def run(hs): daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages - if stats_process is not None: - with stats_process.oneshot(): - stats["memory_rss"] = stats_process.memory_info().rss - stats["cpu_average"] = int(stats_process.cpu_info(interval=None)) + if len(stats_process) > 0: + stats["memory_rss"] = 0 + stats["cpu_average"] = 0 + for process in stats_process: + with process.oneshot(): + stats["memory_rss"] += process.memory_info().rss + stats["cpu_average"] += int(process.cpu_percent(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: @@ -442,25 +445,32 @@ def run(hs): except Exception as e: logger.warn("Error reporting stats: %s", e) - if hs.config.report_stats: + def performance_stats_init(): try: import psutil - stats_process = psutil.Process() + process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. - stats_process.memory_info().rss - stats_process.cpu_percent(interval=None) + process.memory_info().rss + process.cpu_percent(interval=None) + logger.info("report_stats can use psutil") + stats_process.append(process) except (ImportError, AttributeError): logger.warn( - "report_stats enabled but psutil is not installed or incorrect version." - " Disabling reporting of memory/cpu stats." - " Ensuring psutil is available will help matrix track performance changes across releases." + "report_stats enabled but psutil is not installed or incorrect version." + " Disabling reporting of memory/cpu stats." + " Ensuring psutil is available will help matrix track performance changes" + " across releases." ) - stats_process = None + if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000) + # We need to defer this init for the cases that we daemonize + # otherwise the process ID we get is that of the non-daemon process + clock.call_later(15, performance_stats_init) + # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, phone_stats_home) From 792d340572026becf48fe73421f0b73cf575fe46 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 12:25:02 +0100 Subject: [PATCH 29/96] rename stat to future proof --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8bce9f1ace..286f4dcf7b 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -425,7 +425,7 @@ def run(hs): stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() - stats["r30_users"] = yield hs.get_datastore().count_r30_users() + stats["r30_users_all"] = yield hs.get_datastore().count_r30_users() daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages From 79452edeee94a09a826ee2b41a08811b823a3ad6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:03:37 +0100 Subject: [PATCH 30/96] Add joinability for groups Adds API to set the 'joinable' flag, and corresponding flag in the table. --- synapse/federation/transport/client.py | 17 ++++++++++++++++ synapse/federation/transport/server.py | 20 ++++++++++++++++++ synapse/groups/groups_server.py | 19 +++++++++++++++++ synapse/handlers/groups_local.py | 3 +++ synapse/rest/client/v2_alpha/groups.py | 28 ++++++++++++++++++++++++++ synapse/storage/group_server.py | 13 ++++++++++++ synapse/storage/prepare_database.py | 3 ++- 7 files changed, 102 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 5488e82985..46a797b4ba 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -856,6 +857,22 @@ class TransportLayerClient(object): ignore_backoff=True, ) + @log_function + def set_group_joinable(self, destination, group_id, requester_user_id, + content): + """Sets whether a group is joinable without an invite or knock + """ + path = PREFIX + "/groups/%s/joinable" % (group_id,) + + return self.client.post_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + data=content, + ignore_backoff=True, + ) + + @log_function def delete_group_summary_user(self, destination, group_id, requester_user_id, user_id, role_id): diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a66a6b0692..107deb4e1e 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1124,6 +1125,24 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): defer.returnValue((200, resp)) +class FederationGroupsJoinableServlet(BaseFederationServlet): + """Sets whether a group is joinable without an invite or knock + """ + PATH = "/groups/(?P[^/]*)/joinable$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id): + requester_user_id = parse_string_from_args(query, "requester_user_id") + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.set_group_joinable( + group_id, requester_user_id, content + ) + + defer.returnValue((200, new_content)) + + FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationPullServlet, @@ -1172,6 +1191,7 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsSummaryUsersServlet, FederationGroupsAddRoomsServlet, FederationGroupsAddRoomsConfigServlet, + FederationGroupsJoinableServlet, ) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 0b995aed70..25cbfb1691 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -205,6 +206,24 @@ class GroupsServerHandler(object): defer.returnValue({}) + @defer.inlineCallbacks + def set_group_joinable(self, group_id, requester_user_id, content): + """Sets whether a group is joinable without an invite or knock + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + is_joinable = content.get('joinable') + if is_joinable is None: + raise SynapseError( + 400, "No value specified for 'joinable'" + ) + + yield self.store.set_group_joinable(group_id, is_joinable=is_joinable) + + defer.returnValue({}) + @defer.inlineCallbacks def get_group_categories(self, group_id, requester_user_id): """Get all categories in a group (as seen by user) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index e4d0cc8b02..c9671b9046 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -90,6 +91,8 @@ class GroupsLocalHandler(object): get_group_role = _create_rerouter("get_group_role") get_group_roles = _create_rerouter("get_group_roles") + set_group_joinable = _create_rerouter("set_group_joinable") + @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): """Get the group summary for a group. diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index f762dbfa9a..dc8247d172 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -401,6 +402,32 @@ class GroupInvitedUsersServlet(RestServlet): defer.returnValue((200, result)) +class GroupJoinableServlet(RestServlet): + """Set whether a group is joinable without an invite + """ + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/joinable$") + + def __init__(self, hs): + super(GroupJoinableServlet, self).__init__() + self.auth = hs.get_auth() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_POST(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + + result = yield self.groups_handler.set_group_joinable( + group_id, + requester_user_id, + content, + ) + + defer.returnValue((200, result)) + + class GroupCreateServlet(RestServlet): """Create a group """ @@ -738,6 +765,7 @@ def register_servlets(hs, http_server): GroupInvitedUsersServlet(hs).register(http_server) GroupUsersServlet(hs).register(http_server) GroupRoomServlet(hs).register(http_server) + GroupJoinableServlet(hs).register(http_server) GroupCreateServlet(hs).register(http_server) GroupAdminRoomsServlet(hs).register(http_server) GroupAdminRoomsConfigServlet(hs).register(http_server) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 8fde1aab8e..96553d4fb1 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,6 +30,18 @@ _DEFAULT_ROLE_ID = "" class GroupServerStore(SQLBaseStore): + def set_group_joinable(self, group_id, is_joinable): + return self._simple_update_one( + table="groups", + keyvalues={ + "group_id": group_id, + }, + updatevalues={ + "is_joinable": is_joinable, + }, + desc="set_group_joinable", + ) + def get_group(self, group_id): return self._simple_select_one( table="groups", diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c845a0cec5..04411a665f 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,7 +26,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 47 +SCHEMA_VERSION = 48 dir_path = os.path.abspath(os.path.dirname(__file__)) From 352e1ff9ed945fd7f2655bf47d591184fc980afb Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:07:57 +0100 Subject: [PATCH 31/96] Add schema delta file --- .../storage/schema/delta/48/groups_joinable.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 synapse/storage/schema/delta/48/groups_joinable.sql diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql new file mode 100644 index 0000000000..fb9c7a8d1c --- /dev/null +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -0,0 +1,16 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE groups ADD COLUMN is_joinable BOOLEAN NOT NULL DEFAULT 0; From a1642708331ef64e38f4d2708cee9eefbc3d391e Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:23:00 +0100 Subject: [PATCH 32/96] Make column definition that works on both dbs --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index fb9c7a8d1c..39c8fed46c 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -13,4 +13,4 @@ * limitations under the License. */ -ALTER TABLE groups ADD COLUMN is_joinable BOOLEAN NOT NULL DEFAULT 0; +ALTER TABLE groups ADD COLUMN is_joinable BOOLEAN NOT NULL DEFAULT (CAST(0 AS BOOLEAN)); From 33f6195d9ae91520aee9d108d60245b5265ac714 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Wed, 28 Mar 2018 14:25:25 +0100 Subject: [PATCH 33/96] Handle review comments --- synapse/app/homeserver.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0737945ede..b935beb974 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -401,6 +401,9 @@ def run(hs): start_time = clock.time() stats = {} + + # Contains the list of processes we will be monitoring + # currently either 0 or 1 stats_process = [] @defer.inlineCallbacks @@ -428,13 +431,13 @@ def run(hs): daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages + if len(stats_process) > 0: stats["memory_rss"] = 0 stats["cpu_average"] = 0 for process in stats_process: - with process.oneshot(): - stats["memory_rss"] += process.memory_info().rss - stats["cpu_average"] += int(process.cpu_percent(interval=None)) + stats["memory_rss"] += process.memory_info().rss + stats["cpu_average"] += int(process.cpu_percent(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: @@ -459,8 +462,8 @@ def run(hs): logger.warn( "report_stats enabled but psutil is not installed or incorrect version." " Disabling reporting of memory/cpu stats." - " Ensuring psutil is available will help matrix track performance changes" - " across releases." + " Ensuring psutil is available will help matrix.org track performance" + " changes across releases." ) if hs.config.report_stats: @@ -469,7 +472,7 @@ def run(hs): # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process - clock.call_later(15, performance_stats_init) + clock.call_later(0, performance_stats_init) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes From 32260baa410e1ae8200f636861a57bf2039e2cf0 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:29:42 +0100 Subject: [PATCH 34/96] pep8 --- synapse/federation/transport/client.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 46a797b4ba..5a6b63350b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -859,7 +859,7 @@ class TransportLayerClient(object): @log_function def set_group_joinable(self, destination, group_id, requester_user_id, - content): + content): """Sets whether a group is joinable without an invite or knock """ path = PREFIX + "/groups/%s/joinable" % (group_id,) @@ -872,7 +872,6 @@ class TransportLayerClient(object): ignore_backoff=True, ) - @log_function def delete_group_summary_user(self, destination, group_id, requester_user_id, user_id, role_id): From 86932be2cb1837688d154ff78fb6418f78483133 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 14:36:53 +0100 Subject: [PATCH 35/96] Support multi client R30 for psql --- synapse/app/homeserver.py | 4 +++- synapse/storage/__init__.py | 34 +++++++++++++++++++++++++++------- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 286f4dcf7b..35e2b00f1b 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -425,7 +425,9 @@ def run(hs): stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() - stats["r30_users_all"] = yield hs.get_datastore().count_r30_users() + r30_results = yield hs.get_datastore().count_r30_users() + for name, count in r30_results.items(): + stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index ba43b2d8ec..b651973c79 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -280,6 +280,15 @@ class DataStore(RoomMemberStore, RoomStore, now = int(self._clock.time_msec()) thirty_days_ago_in_secs = now - thirty_days_in_secs + # Are these filters sufficiently robust? + filters = { + "ALL": "", + "IOS": "^(Vector|Riot|Riot\.im)\/.* iOS", + "ANDROID": "^(Dalvik|Riot|Riot\.im)\/.* Android", + "ELECTRON": "Electron", + "WEB": "(Gecko|Mozilla)", + } + sql = """ SELECT COALESCE(count(*), 0) FROM ( SELECT users.name, users.creation_ts * 1000, MAX(user_ips.last_seen) @@ -289,16 +298,27 @@ class DataStore(RoomMemberStore, RoomStore, AND users.creation_ts < ? AND user_ips.last_seen/1000 > ? AND (user_ips.last_seen/1000) - users.creation_ts > ? - GROUP BY users.name, users.creation_ts - ) u """ - txn.execute(sql, (thirty_days_ago_in_secs, - thirty_days_ago_in_secs, - thirty_days_in_secs)) + if isinstance(self.database_engine, PostgresEngine): + sql = sql + "AND user_ips.user_agent ~ ? " + sql = sql + "GROUP BY users.name, users.creation_ts ) u" - count, = txn.fetchone() - return count + results = {} + if isinstance(self.database_engine, PostgresEngine): + for filter_name, user_agent_filter in filters.items(): + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs, + thirty_days_in_secs, + user_agent_filter)) + results[filter_name], = txn.fetchone() + + else: + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs, + thirty_days_in_secs)) + results["ALL"], = txn.fetchone() + return results ret = yield self.runInteraction("count_r30_users", _count_r30_users) defer.returnValue(ret) From 4262aba17b643bc82c5cce92298dac0a27b2727c Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 14:40:03 +0100 Subject: [PATCH 36/96] bump schema version --- synapse/storage/prepare_database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c845a0cec5..68675e15d2 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 47 +SCHEMA_VERSION = 48 dir_path = os.path.abspath(os.path.dirname(__file__)) From a838444a70195588de55a514524c4af720099177 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:50:30 +0100 Subject: [PATCH 37/96] Grr. Copy the definition from is_admin --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index 39c8fed46c..9e106e909c 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -13,4 +13,4 @@ * limitations under the License. */ -ALTER TABLE groups ADD COLUMN is_joinable BOOLEAN NOT NULL DEFAULT (CAST(0 AS BOOLEAN)); +ALTER TABLE groups ADD COLUMN is_joinable BOOL DEFAULT 0 NOT NULL; From 929b34963d320f571512453dac980ef235914956 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:53:55 +0100 Subject: [PATCH 38/96] OK, smallint it is then --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index 9e106e909c..ace7d0a723 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -13,4 +13,4 @@ * limitations under the License. */ -ALTER TABLE groups ADD COLUMN is_joinable BOOL DEFAULT 0 NOT NULL; +ALTER TABLE groups ADD COLUMN is_joinable SMALLINT DEFAULT 0 NOT NULL; From 241e4e86873d5880f564791e3768247fa55c3fa8 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 16:25:53 +0100 Subject: [PATCH 39/96] remove twisted deferral cruft --- synapse/storage/__init__.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index b651973c79..b2b85e266d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -244,7 +244,6 @@ class DataStore(RoomMemberStore, RoomStore, return [UserPresenceState(**row) for row in rows] - @defer.inlineCallbacks def count_daily_users(self): """ Counts the number of users who used this homeserver in the last 24 hours. @@ -264,10 +263,9 @@ class DataStore(RoomMemberStore, RoomStore, count, = txn.fetchone() return count - ret = yield self.runInteraction("count_users", _count_users) - defer.returnValue(ret) + return self.runInteraction("count_users", _count_users) + - @defer.inlineCallbacks def count_r30_users(self): """ Counts the number of 30 day retained users, defined as:- @@ -320,8 +318,7 @@ class DataStore(RoomMemberStore, RoomStore, results["ALL"], = txn.fetchone() return results - ret = yield self.runInteraction("count_r30_users", _count_r30_users) - defer.returnValue(ret) + return self.runInteraction("count_r30_users", _count_r30_users) def get_users(self): """Function to reterive a list of users in users table. From c5de6987c210cce906cf279d85cbd98cd14bfc52 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 16:44:11 +0100 Subject: [PATCH 40/96] This should probably be a PUT --- synapse/rest/client/v2_alpha/groups.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index dc8247d172..aa94130e57 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -413,7 +413,7 @@ class GroupJoinableServlet(RestServlet): self.groups_handler = hs.get_groups_local_handler() @defer.inlineCallbacks - def on_POST(self, request, group_id): + def on_PUT(self, request, group_id): requester = yield self.auth.get_user_by_req(request) requester_user_id = requester.user.to_string() From dc7c020b33dc9606089fa66fdec2dacb7f807f6d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 17:25:15 +0100 Subject: [PATCH 41/96] fix pep8 errors --- synapse/storage/__init__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index b2b85e266d..70c6171404 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from synapse.storage.devices import DeviceStore from .appservice import ( ApplicationServiceStore, ApplicationServiceTransactionStore @@ -265,7 +263,6 @@ class DataStore(RoomMemberStore, RoomStore, return self.runInteraction("count_users", _count_users) - def count_r30_users(self): """ Counts the number of 30 day retained users, defined as:- From 9ee44a372d4fcf6a461b610230a285610613e8ac Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 29 Mar 2018 16:45:34 +0100 Subject: [PATCH 42/96] Remove need for sqlite specific query --- synapse/storage/__init__.py | 85 ++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 29 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 70c6171404..0b4693041f 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -269,50 +269,77 @@ class DataStore(RoomMemberStore, RoomStore, * Users who have created their accounts more than 30 days * Where last seen at most 30 days ago * Where account creation and last_seen are > 30 days + + Returns counts globaly for a given user as well as breaking + by platform """ def _count_r30_users(txn): thirty_days_in_secs = 86400 * 30 now = int(self._clock.time_msec()) thirty_days_ago_in_secs = now - thirty_days_in_secs - # Are these filters sufficiently robust? - filters = { - "ALL": "", - "IOS": "^(Vector|Riot|Riot\.im)\/.* iOS", - "ANDROID": "^(Dalvik|Riot|Riot\.im)\/.* Android", - "ELECTRON": "Electron", - "WEB": "(Gecko|Mozilla)", - } + sql = """ + SELECT platform, COALESCE(count(*), 0) FROM ( + SELECT users.name, platform, users.creation_ts * 1000, MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen, + CASE + WHEN user_agent LIKE '%Android%' THEN 'android' + WHEN user_agent LIKE '%iOS%' THEN 'ios' + WHEN user_agent LIKE '%Electron%' THEN 'electron' + WHEN user_agent LIKE '%Mozilla%' THEN 'web' + WHEN user_agent LIKE '%Gecko%' THEN 'web' + ELSE 'unknown' + END + AS platform + FROM user_ips + ) uip + ON users.name = uip.user_id + AND users.appservice_id is NULL + AND users.creation_ts < ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, platform, users.creation_ts + ) u GROUP BY platform + """ + + results = {} + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs)) + rows = txn.fetchall() + for row in rows: + if row[0] is 'unknown': + pass + results[row[0]] = row[1] sql = """ SELECT COALESCE(count(*), 0) FROM ( - SELECT users.name, users.creation_ts * 1000, MAX(user_ips.last_seen) - FROM users, user_ips - WHERE users.name = user_ips.user_id + SELECT users.name, users.creation_ts * 1000, MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen + FROM user_ips + ) uip + ON users.name = uip.user_id AND appservice_id is NULL AND users.creation_ts < ? - AND user_ips.last_seen/1000 > ? - AND (user_ips.last_seen/1000) - users.creation_ts > ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, users.creation_ts + ) u """ - if isinstance(self.database_engine, PostgresEngine): - sql = sql + "AND user_ips.user_agent ~ ? " - sql = sql + "GROUP BY users.name, users.creation_ts ) u" + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs)) - results = {} - if isinstance(self.database_engine, PostgresEngine): - for filter_name, user_agent_filter in filters.items(): - txn.execute(sql, (thirty_days_ago_in_secs, - thirty_days_ago_in_secs, - thirty_days_in_secs, - user_agent_filter)) - results[filter_name], = txn.fetchone() + count, = txn.fetchone() + results['all'] = count - else: - txn.execute(sql, (thirty_days_ago_in_secs, - thirty_days_ago_in_secs, - thirty_days_in_secs)) - results["ALL"], = txn.fetchone() return results return self.runInteraction("count_r30_users", _count_r30_users) From b4e37c6f50b91dd0ea90c773185884659e3a738a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 29 Mar 2018 17:27:39 +0100 Subject: [PATCH 43/96] pep8 --- synapse/storage/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 0b4693041f..f68e436df0 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -280,7 +280,8 @@ class DataStore(RoomMemberStore, RoomStore, sql = """ SELECT platform, COALESCE(count(*), 0) FROM ( - SELECT users.name, platform, users.creation_ts * 1000, MAX(uip.last_seen) + SELECT users.name, platform, users.creation_ts * 1000, + MAX(uip.last_seen) FROM users INNER JOIN ( SELECT @@ -317,7 +318,8 @@ class DataStore(RoomMemberStore, RoomStore, sql = """ SELECT COALESCE(count(*), 0) FROM ( - SELECT users.name, users.creation_ts * 1000, MAX(uip.last_seen) + SELECT users.name, users.creation_ts * 1000, + MAX(uip.last_seen) FROM users INNER JOIN ( SELECT From fcfe7f6ad3a2a9c285ac96008395fc47e096ff4b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 29 Mar 2018 22:45:52 +0100 Subject: [PATCH 44/96] Use simplejson throughout Let's use simplejson rather than json, for consistency. --- synapse/api/errors.py | 3 ++- synapse/handlers/identity.py | 8 +++++--- synapse/storage/schema/delta/14/upgrade_appservice_db.py | 3 ++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index aa15f73f36..bee59e80dd 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -15,9 +15,10 @@ """Contains exceptions and error codes.""" -import json import logging +import simplejson as json + logger = logging.getLogger(__name__) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 9efcdff1d6..91a0898860 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -15,6 +15,11 @@ # limitations under the License. """Utilities for interacting with Identity Servers""" + +import logging + +import simplejson as json + from twisted.internet import defer from synapse.api.errors import ( @@ -24,9 +29,6 @@ from ._base import BaseHandler from synapse.util.async import run_on_reactor from synapse.api.errors import SynapseError, Codes -import json -import logging - logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/14/upgrade_appservice_db.py b/synapse/storage/schema/delta/14/upgrade_appservice_db.py index 8755bb2e49..4d725b92fe 100644 --- a/synapse/storage/schema/delta/14/upgrade_appservice_db.py +++ b/synapse/storage/schema/delta/14/upgrade_appservice_db.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json import logging +import simplejson as json + logger = logging.getLogger(__name__) From 05630758f25d958bf60fde4df5f80a89e4a9a0ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 29 Mar 2018 22:57:28 +0100 Subject: [PATCH 45/96] Use static JSONEncoders using json.dumps with custom options requires us to create a new JSONEncoder on each call. It's more efficient to create one upfront and reuse it. --- synapse/handlers/message.py | 4 ++-- synapse/replication/tcp/commands.py | 8 +++++--- synapse/storage/events.py | 23 ++++++++--------------- synapse/util/frozenutils.py | 19 +++++++++++++++++++ 4 files changed, 34 insertions(+), 20 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5a8ddc253e..6de6e13b7b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -27,7 +27,7 @@ from synapse.types import ( from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter from synapse.util.logcontext import preserve_fn, run_in_background from synapse.util.metrics import measure_func -from synapse.util.frozenutils import unfreeze +from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client from synapse.replication.http.send_event import send_event_to_master @@ -678,7 +678,7 @@ class EventCreationHandler(object): # Ensure that we can round trip before trying to persist in db try: - dump = simplejson.dumps(unfreeze(event.content)) + dump = frozendict_json_encoder.encode(event.content) simplejson.loads(dump) except Exception: logger.exception("Failed to encode content: %r", event.content) diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 0005ad5879..34bcf903a3 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -24,6 +24,8 @@ import simplejson logger = logging.getLogger(__name__) +_json_encoder = simplejson.JSONEncoder(namedtuple_as_object=False) + class Command(object): """The base command class. @@ -107,7 +109,7 @@ class RdataCommand(Command): return " ".join(( self.stream_name, str(self.token) if self.token is not None else "batch", - simplejson.dumps(self.row, namedtuple_as_object=False), + _json_encoder.dumps(self.row), )) @@ -302,7 +304,7 @@ class InvalidateCacheCommand(Command): def to_line(self): return " ".join(( - self.cache_func, simplejson.dumps(self.keys, namedtuple_as_object=False) + self.cache_func, _json_encoder.encode(self.keys), )) @@ -334,7 +336,7 @@ class UserIpCommand(Command): ) def to_line(self): - return self.user_id + " " + simplejson.dumps(( + return self.user_id + " " + _json_encoder.encode(( self.access_token, self.ip, self.user_agent, self.device_id, self.last_seen, )) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f3d65f4338..ece5e6c41f 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -14,15 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.events_worker import EventsWorkerStore +from collections import OrderedDict, deque, namedtuple +from functools import wraps +import logging +import simplejson as json from twisted.internet import defer -from synapse.events import USE_FROZEN_DICTS +from synapse.storage.events_worker import EventsWorkerStore from synapse.util.async import ObservableDeferred +from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.logcontext import ( - PreserveLoggingContext, make_deferred_yieldable + PreserveLoggingContext, make_deferred_yieldable, ) from synapse.util.logutils import log_function from synapse.util.metrics import Measure @@ -30,16 +34,8 @@ from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.types import get_domain_from_id - -from canonicaljson import encode_canonical_json -from collections import deque, namedtuple, OrderedDict -from functools import wraps - import synapse.metrics -import logging -import simplejson as json - # these are only included to make the type annotations work from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 @@ -71,10 +67,7 @@ state_delta_reuse_delta_counter = metrics.register_counter( def encode_json(json_object): - if USE_FROZEN_DICTS: - return encode_canonical_json(json_object) - else: - return json.dumps(json_object, ensure_ascii=False) + return frozendict_json_encoder.encode(json_object) class _EventPeristenceQueue(object): diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 6322f0f55c..f497b51f4a 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -14,6 +14,7 @@ # limitations under the License. from frozendict import frozendict +import simplejson as json def freeze(o): @@ -49,3 +50,21 @@ def unfreeze(o): pass return o + + +def _handle_frozendict(obj): + """Helper for EventEncoder. Makes frozendicts serializable by returning + the underlying dict + """ + if type(obj) is frozendict: + # fishing the protected dict out of the object is a bit nasty, + # but we don't really want the overhead of copying the dict. + return obj._dict + raise TypeError('Object of type %s is not JSON serializable' % + obj.__class__.__name__) + + +# A JSONEncoder which is capable of encoding frozendics without barfing +frozendict_json_encoder = json.JSONEncoder( + default=_handle_frozendict, +) From 2fe3f848b92ee9493a724935167fad84678a7eb2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 29 Mar 2018 23:05:33 +0100 Subject: [PATCH 46/96] Remove uses of events.content --- synapse/storage/room.py | 7 ++++--- synapse/storage/roommember.py | 6 ++++-- synapse/storage/search.py | 6 ++++-- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 908551d6d9..740c036975 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -594,7 +594,8 @@ class RoomStore(RoomWorkerStore, SearchStore): while next_token: sql = """ - SELECT stream_ordering, content FROM events + SELECT stream_ordering, json FROM events + JOIN event_json USING (event_id) WHERE room_id = ? AND stream_ordering < ? AND contains_url = ? AND outlier = ? @@ -606,8 +607,8 @@ class RoomStore(RoomWorkerStore, SearchStore): next_token = None for stream_ordering, content_json in txn: next_token = stream_ordering - content = json.loads(content_json) - + event_json = json.loads(content_json) + content = event_json["content"] content_url = content.get("url") thumbnail_url = content.get("info", {}).get("thumbnail_url") diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index d662d1cfc0..6a861943a2 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -645,8 +645,9 @@ class RoomMemberStore(RoomMemberWorkerStore): def add_membership_profile_txn(txn): sql = (""" - SELECT stream_ordering, event_id, events.room_id, content + SELECT stream_ordering, event_id, events.room_id, event_json.json FROM events + INNER JOIN event_json USING (event_id) INNER JOIN room_memberships USING (event_id) WHERE ? <= stream_ordering AND stream_ordering < ? AND type = 'm.room.member' @@ -667,7 +668,8 @@ class RoomMemberStore(RoomMemberWorkerStore): event_id = row["event_id"] room_id = row["room_id"] try: - content = json.loads(row["content"]) + event_json = json.loads(row["json"]) + content = event_json['content'] except Exception: continue diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 984643b057..426cbe6e1a 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -75,8 +75,9 @@ class SearchStore(BackgroundUpdateStore): def reindex_search_txn(txn): sql = ( - "SELECT stream_ordering, event_id, room_id, type, content, " + "SELECT stream_ordering, event_id, room_id, type, json, " " origin_server_ts FROM events" + " JOIN event_json USING (event_id)" " WHERE ? <= stream_ordering AND stream_ordering < ?" " AND (%s)" " ORDER BY stream_ordering DESC" @@ -104,7 +105,8 @@ class SearchStore(BackgroundUpdateStore): stream_ordering = row["stream_ordering"] origin_server_ts = row["origin_server_ts"] try: - content = json.loads(row["content"]) + event_json = json.loads(row["json"]) + content = event_json["content"] except Exception: continue From 11597ddea5c43fdd2c6593b6bf4619a7bbdf3122 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Fri, 30 Mar 2018 23:59:02 +0200 Subject: [PATCH 47/96] improve mxid check performance ~4x Signed-off-by: Adrian Tschira --- synapse/types.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/types.py b/synapse/types.py index 7cb24cecb2..f1f41ccf90 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -12,11 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import string from synapse.api.errors import SynapseError from collections import namedtuple +import re class Requester(namedtuple("Requester", [ @@ -214,7 +214,8 @@ class GroupID(DomainSpecificString): return group_id -mxid_localpart_allowed_characters = set("_-./=" + string.ascii_lowercase + string.digits) +# A regex that matches any valid mxid characters +MXID_LOCALPART_REGEX = re.compile("^[_\-./=a-z0-9]*$") def contains_invalid_mxid_characters(localpart): @@ -226,7 +227,7 @@ def contains_invalid_mxid_characters(localpart): Returns: bool: True if there are any naughty characters """ - return any(c not in mxid_localpart_allowed_characters for c in localpart) + return not MXID_LOCALPART_REGEX.match(localpart) class StreamToken( From 3ee4ad09eb9bcd0214da83b66214afa3ddb08116 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 3 Apr 2018 15:09:48 +0100 Subject: [PATCH 48/96] Fix json encoding bug in replication json encoders have an encode method, not a dumps method. --- synapse/replication/tcp/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 34bcf903a3..12aac3cc6b 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -109,7 +109,7 @@ class RdataCommand(Command): return " ".join(( self.stream_name, str(self.token) if self.token is not None else "batch", - _json_encoder.dumps(self.row), + _json_encoder.encode(self.row), )) From eb8d8d6f57c7f6017548aa95409bb8cc346a5ae0 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Tue, 3 Apr 2018 15:40:43 +0100 Subject: [PATCH 49/96] Use join_policy API instead of joinable The API is now under /groups/$group_id/setting/m.join_policy and expects a JSON blob of the shape ```json { "m.join_policy": { "type": "invite" } } ``` where "invite" could alternatively be "open". --- synapse/federation/transport/client.py | 4 +- synapse/federation/transport/server.py | 8 ++-- synapse/groups/groups_server.py | 41 ++++++++++++++++--- synapse/handlers/groups_local.py | 2 +- synapse/rest/client/v2_alpha/groups.py | 12 +++--- synapse/storage/group_server.py | 6 +-- .../schema/delta/48/groups_joinable.sql | 8 +++- 7 files changed, 58 insertions(+), 23 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 5a6b63350b..0f7f656824 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -860,9 +860,9 @@ class TransportLayerClient(object): @log_function def set_group_joinable(self, destination, group_id, requester_user_id, content): - """Sets whether a group is joinable without an invite or knock + """Sets the join policy for a group """ - path = PREFIX + "/groups/%s/joinable" % (group_id,) + path = PREFIX + "/groups/%s/setting/m.join_policy" % (group_id,) return self.client.post_json( destination=destination, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 107deb4e1e..a52d3948f4 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1125,10 +1125,10 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): defer.returnValue((200, resp)) -class FederationGroupsJoinableServlet(BaseFederationServlet): +class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): """Sets whether a group is joinable without an invite or knock """ - PATH = "/groups/(?P[^/]*)/joinable$" + PATH = "/groups/(?P[^/]*)/setting/m.join_policy$" @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id): @@ -1136,7 +1136,7 @@ class FederationGroupsJoinableServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.set_group_joinable( + new_content = yield self.handler.set_group_join_policy( group_id, requester_user_id, content ) @@ -1191,7 +1191,7 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsSummaryUsersServlet, FederationGroupsAddRoomsServlet, FederationGroupsAddRoomsConfigServlet, - FederationGroupsJoinableServlet, + FederationGroupsSettingJoinPolicyServlet, ) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 25cbfb1691..70781e1854 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -207,20 +207,24 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def set_group_joinable(self, group_id, requester_user_id, content): - """Sets whether a group is joinable without an invite or knock + def set_group_join_policy(self, group_id, requester_user_id, content): + """Sets the group join policy. + + Currently supported policies are: + - "invite": an invite must be received and accepted in order to join. + - "open": anyone can join. """ yield self.check_group_is_ours( group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id ) - is_joinable = content.get('joinable') - if is_joinable is None: + join_policy = _parse_join_policy_from_contents(content) + if join_policy is None: raise SynapseError( - 400, "No value specified for 'joinable'" + 400, "No value specified for 'm.join_policy'" ) - yield self.store.set_group_joinable(group_id, is_joinable=is_joinable) + yield self.store.set_group_join_policy(group_id, join_policy=join_policy) defer.returnValue({}) @@ -854,6 +858,31 @@ class GroupsServerHandler(object): }) +def _parse_join_policy_from_contents(content): + """Given a content for a request, return the specified join policy or None + """ + + join_policy_dict = content.get("m.join_policy") + if join_policy_dict: + return _parse_join_policy_dict(join_policy_dict) + else: + return None + + +def _parse_join_policy_dict(join_policy_dict): + """Given a dict for the "m.join_policy" config return the join policy specified + """ + join_policy_type = join_policy_dict.get("type") + if not join_policy_type: + return True + + if join_policy_type not in ("invite", "open"): + raise SynapseError( + 400, "Synapse only supports 'invite'/'open' join rule" + ) + return join_policy_type + + def _parse_visibility_from_contents(content): """Given a content for a request parse out whether the entity should be public or not diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index c9671b9046..5f7b0ff305 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -91,7 +91,7 @@ class GroupsLocalHandler(object): get_group_role = _create_rerouter("get_group_role") get_group_roles = _create_rerouter("get_group_roles") - set_group_joinable = _create_rerouter("set_group_joinable") + set_group_join_policy = _create_rerouter("set_group_join_policy") @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index aa94130e57..8faaa1d6a0 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -402,13 +402,13 @@ class GroupInvitedUsersServlet(RestServlet): defer.returnValue((200, result)) -class GroupJoinableServlet(RestServlet): - """Set whether a group is joinable without an invite +class GroupSettingJoinPolicyServlet(RestServlet): + """Set group join policy """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/joinable$") + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/setting/m.join_policy$") def __init__(self, hs): - super(GroupJoinableServlet, self).__init__() + super(GroupSettingJoinPolicyServlet, self).__init__() self.auth = hs.get_auth() self.groups_handler = hs.get_groups_local_handler() @@ -419,7 +419,7 @@ class GroupJoinableServlet(RestServlet): content = parse_json_object_from_request(request) - result = yield self.groups_handler.set_group_joinable( + result = yield self.groups_handler.set_group_join_policy( group_id, requester_user_id, content, @@ -765,7 +765,7 @@ def register_servlets(hs, http_server): GroupInvitedUsersServlet(hs).register(http_server) GroupUsersServlet(hs).register(http_server) GroupRoomServlet(hs).register(http_server) - GroupJoinableServlet(hs).register(http_server) + GroupSettingJoinPolicyServlet(hs).register(http_server) GroupCreateServlet(hs).register(http_server) GroupAdminRoomsServlet(hs).register(http_server) GroupAdminRoomsConfigServlet(hs).register(http_server) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 96553d4fb1..db66ea1eb0 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -30,16 +30,16 @@ _DEFAULT_ROLE_ID = "" class GroupServerStore(SQLBaseStore): - def set_group_joinable(self, group_id, is_joinable): + def set_group_join_policy(self, group_id, join_policy): return self._simple_update_one( table="groups", keyvalues={ "group_id": group_id, }, updatevalues={ - "is_joinable": is_joinable, + "join_policy": join_policy, }, - desc="set_group_joinable", + desc="set_group_join_policy", ) def get_group(self, group_id): diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index ace7d0a723..ab3b00286d 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -13,4 +13,10 @@ * limitations under the License. */ -ALTER TABLE groups ADD COLUMN is_joinable SMALLINT DEFAULT 0 NOT NULL; +/* + * This isn't a real ENUM because sqlite doesn't support it + * and we use a default of NULL for inserted rows and interpret + * NULL at the python store level as necessary so that existing + * rows are given the correct default policy. + */ +ALTER TABLE groups ADD COLUMN join_policy TEXT DEFAULT NULL; From f92963f5db236c1afb2a489a44c9afdae7d61edc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 4 Apr 2018 12:08:29 +0100 Subject: [PATCH 50/96] Revert "improve mxid check performance" --- synapse/types.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/types.py b/synapse/types.py index f1f41ccf90..7cb24cecb2 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -12,11 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import string from synapse.api.errors import SynapseError from collections import namedtuple -import re class Requester(namedtuple("Requester", [ @@ -214,8 +214,7 @@ class GroupID(DomainSpecificString): return group_id -# A regex that matches any valid mxid characters -MXID_LOCALPART_REGEX = re.compile("^[_\-./=a-z0-9]*$") +mxid_localpart_allowed_characters = set("_-./=" + string.ascii_lowercase + string.digits) def contains_invalid_mxid_characters(localpart): @@ -227,7 +226,7 @@ def contains_invalid_mxid_characters(localpart): Returns: bool: True if there are any naughty characters """ - return not MXID_LOCALPART_REGEX.match(localpart) + return any(c not in mxid_localpart_allowed_characters for c in localpart) class StreamToken( From 301b339494f473fddd04cad9a9b107615e9dfa8d Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 4 Apr 2018 08:45:51 -0600 Subject: [PATCH 51/96] Move the mention of the main synapse worker higher up Signed-off-by: Travis Ralston --- docs/workers.rst | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/docs/workers.rst b/docs/workers.rst index a5e084c22a..bf8dd1ee48 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -55,7 +55,12 @@ synapse process.) You then create a set of configs for the various worker processes. These should be worker configuration files, and should be stored in a dedicated -subdirectory, to allow synctl to manipulate them. +subdirectory, to allow synctl to manipulate them. An additional configuration +for the master synapse process will need to be created because the process will +not be started automatically. That configuration should look like this:: + + worker_app: synapse.app.homeserver + daemonize: true Each worker configuration file inherits the configuration of the main homeserver configuration file. You can then override configuration specific to that worker, @@ -115,18 +120,6 @@ To manipulate a specific worker, you pass the -w option to synctl:: synctl -w $CONFIG/workers/synchrotron.yaml restart -After setting up your workers, you'll need to create a worker configuration for -the main synapse process. That worker configuration should look like this::: - - worker_app: synapse.app.homeserver - daemonize: true - -Be sure to keep this particular configuration limited as synapse may refuse to -start if the regular ``worker_*`` options are given. The ``homeserver.yaml`` -configuration will be used to set up the main synapse process. - -**You must have a worker configuration for the main synapse process!** - Available worker applications ----------------------------- From 204fc985204f0c24574ad2bf9fa9518d4fa7552d Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 4 Apr 2018 08:46:17 -0600 Subject: [PATCH 52/96] Document the additional routes for the event_creator worker Fixes https://github.com/matrix-org/synapse/issues/3018 Signed-off-by: Travis Ralston --- docs/workers.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/workers.rst b/docs/workers.rst index bf8dd1ee48..c3868d6e41 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -235,9 +235,11 @@ file. For example:: ``synapse.app.event_creator`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Handles non-state event creation. It can handle REST endpoints matching: +Handles some event creation. It can handle REST endpoints matching: ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send + ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$ + ^/_matrix/client/(api/v1|r0|unstable)/join/ It will create events locally and then send them on to the main synapse instance to be persisted and handled. From e4570c53dd35f00103e2353884d1dd446fc4c0f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Wed, 4 Apr 2018 16:46:58 +0100 Subject: [PATCH 53/96] phone home cache size configurations --- synapse/app/homeserver.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index b935beb974..464799ac90 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -48,6 +48,7 @@ from synapse.server import HomeServer from synapse.storage import are_all_users_on_domain from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database +from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole @@ -431,6 +432,8 @@ def run(hs): daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages + stats["cache_factor"] = CACHE_SIZE_FACTOR + stats["event_cache_size"] = hs.config.event_cache_size if len(stats_process) > 0: stats["memory_rss"] = 0 From 518f6de0881378b1fa356e21256436491d43c93c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 4 Apr 2018 19:46:28 +0100 Subject: [PATCH 54/96] Remove redundant metrics which were deprecated in 0.27.0. --- CHANGES.rst | 9 +++++++++ UPGRADE.rst | 9 ++++++++- docs/metrics-howto.rst | 11 +++++++++++ synapse/http/server.py | 26 -------------------------- synapse/util/metrics.py | 25 ------------------------- 5 files changed, 28 insertions(+), 52 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 38372381ac..5fbad54427 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +Changes in synapse v0.28.0 (2018-xx-xx) +======================================= + +As previously advised, this release removes a number of redundant Prometheus +metrics. Administrators may need to update their dashboards and alerting rules +to use the updated metric names, if they have not already done so. See +`docs/metrics-howto.rst `_ +for more details. + Changes in synapse v0.27.2 (2018-03-26) ======================================= diff --git a/UPGRADE.rst b/UPGRADE.rst index f6bb1070b1..39a16b1c0c 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -52,7 +52,7 @@ Upgrading to $NEXT_VERSION ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. @@ -60,6 +60,13 @@ We would appreciate it if you could assist by ensuring this module is available and ``report_stats`` is enabled. This will let us see if performance changes to synapse are having an impact to the general community. +This release also removes a number of redundant Prometheus metrics. +Administrators may need to update their dashboards and alerting rules to use +the updated metric names, if they have not already done so. See +`docs/metrics-howto.rst `_ +for more details. + + Upgrading to v0.15.0 ==================== diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 8acc479bc3..5e2d7c52ec 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -34,6 +34,17 @@ How to monitor Synapse metrics using Prometheus Restart prometheus. +Deprecated metrics removed in 0.28.0 +------------------------------------ + +Synapse 0.28.0 removes all of the metrics deprecated by 0.27.0, which are those +listed under "Old name" below. This has been done to reduce the bandwidth used +by gathering metrics and the storage requirements for the Prometheus server, as +well as reducing CPU overhead for both Synapse and Prometheus. + +Administrators should update any alerts or monitoring dashboards to use the +"New name" listed below. + Block and response metrics renamed for 0.27.0 --------------------------------------------- diff --git a/synapse/http/server.py b/synapse/http/server.py index f19c068ef6..02c7e46f08 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -47,17 +47,6 @@ metrics = synapse.metrics.get_metrics_for(__name__) response_count = metrics.register_counter( "response_count", labels=["method", "servlet", "tag"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_requests", - "_response_time:count", - "_response_ru_utime:count", - "_response_ru_stime:count", - "_response_db_txn_count:count", - "_response_db_txn_duration:count", - ) - ) ) requests_counter = metrics.register_counter( @@ -73,39 +62,24 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_counter( "response_time_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_time:total", - ), ) response_ru_utime = metrics.register_counter( "response_ru_utime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_utime:total", - ), ) response_ru_stime = metrics.register_counter( "response_ru_stime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_stime:total", - ), ) response_db_txn_count = metrics.register_counter( "response_db_txn_count", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_count:total", - ), ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_duration:total", - ), ) # seconds spent waiting for a db connection, when processing this request diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index e4b5687a4b..c3d8237e8f 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -31,53 +31,28 @@ metrics = synapse.metrics.get_metrics_for(__name__) block_counter = metrics.register_counter( "block_count", labels=["block_name"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_block_timer:count", - "_block_ru_utime:count", - "_block_ru_stime:count", - "_block_db_txn_count:count", - "_block_db_txn_duration:count", - ) - ) ) block_timer = metrics.register_counter( "block_time_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_timer:total", - ), ) block_ru_utime = metrics.register_counter( "block_ru_utime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_utime:total", - ), ) block_ru_stime = metrics.register_counter( "block_ru_stime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_stime:total", - ), ) block_db_txn_count = metrics.register_counter( "block_db_txn_count", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_count:total", - ), ) # seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_duration:total", - ), ) # seconds spent waiting for a db connection, in this block From 0e5f479fc05ef9257c1bfce033c8fb91e6244ffe Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 5 Apr 2018 12:16:46 +0100 Subject: [PATCH 55/96] Review comments Use iteritems over item to loop over dict formatting --- synapse/app/homeserver.py | 2 +- synapse/storage/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 35e2b00f1b..777e9c529a 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -426,7 +426,7 @@ def run(hs): stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() - for name, count in r30_results.items(): + for name, count in r30_results.iteritems(): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index f68e436df0..4800584b59 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -280,8 +280,9 @@ class DataStore(RoomMemberStore, RoomStore, sql = """ SELECT platform, COALESCE(count(*), 0) FROM ( - SELECT users.name, platform, users.creation_ts * 1000, - MAX(uip.last_seen) + SELECT + users.name, platform, users.creation_ts * 1000, + MAX(uip.last_seen) FROM users INNER JOIN ( SELECT @@ -310,8 +311,8 @@ class DataStore(RoomMemberStore, RoomStore, results = {} txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - rows = txn.fetchall() - for row in rows: + + for row in txn: if row[0] is 'unknown': pass results[row[0]] = row[1] From b214a04ffc1200535f1d9c6ec45717cd266f36e5 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 13:29:16 +0100 Subject: [PATCH 56/96] Document set_group_join_policy --- synapse/storage/group_server.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index db66ea1eb0..ab4f710f7d 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -31,6 +31,12 @@ _DEFAULT_ROLE_ID = "" class GroupServerStore(SQLBaseStore): def set_group_join_policy(self, group_id, join_policy): + """Set the join policy of a group. + + join_policy can be one of: + * "invite" + * "open" + """ return self._simple_update_one( table="groups", keyvalues={ From 700e5e719875dd7008791f52828bb3cd92d6ce21 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 14:01:17 +0100 Subject: [PATCH 57/96] Use DEFAULT join_policy of "invite" in db --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index ab3b00286d..53add94367 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -19,4 +19,4 @@ * NULL at the python store level as necessary so that existing * rows are given the correct default policy. */ -ALTER TABLE groups ADD COLUMN join_policy TEXT DEFAULT NULL; +ALTER TABLE groups ADD COLUMN join_policy TEXT NON NULL DEFAULT 'invite'; From 104c0bc1d5d1f2a487c50d63b22caa477b091976 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 14:07:16 +0100 Subject: [PATCH 58/96] Use "/settings/" (plural) --- synapse/federation/transport/client.py | 2 +- synapse/federation/transport/server.py | 2 +- synapse/rest/client/v2_alpha/groups.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 0f7f656824..1fe162d55b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -862,7 +862,7 @@ class TransportLayerClient(object): content): """Sets the join policy for a group """ - path = PREFIX + "/groups/%s/setting/m.join_policy" % (group_id,) + path = PREFIX + "/groups/%s/settings/m.join_policy" % (group_id,) return self.client.post_json( destination=destination, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a52d3948f4..3658ca75f3 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1128,7 +1128,7 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): """Sets whether a group is joinable without an invite or knock """ - PATH = "/groups/(?P[^/]*)/setting/m.join_policy$" + PATH = "/groups/(?P[^/]*)/settings/m.join_policy$" @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 8faaa1d6a0..3bb1ec2af6 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -405,7 +405,7 @@ class GroupInvitedUsersServlet(RestServlet): class GroupSettingJoinPolicyServlet(RestServlet): """Set group join policy """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/setting/m.join_policy$") + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") def __init__(self, hs): super(GroupSettingJoinPolicyServlet, self).__init__() From 917380e89d2d323be1a6ea03e53a31ed335c80df Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 14:32:12 +0100 Subject: [PATCH 59/96] NON NULL -> NOT NULL --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index 53add94367..ce26eaf0c9 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -19,4 +19,4 @@ * NULL at the python store level as necessary so that existing * rows are given the correct default policy. */ -ALTER TABLE groups ADD COLUMN join_policy TEXT NON NULL DEFAULT 'invite'; +ALTER TABLE groups ADD COLUMN join_policy TEXT NOT NULL DEFAULT 'invite'; From 01afc563c39006c21bb7752831cd62c146edc135 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 5 Apr 2018 16:24:04 +0100 Subject: [PATCH 60/96] Fix overzealous cache invalidation Fixes an issue where a cache invalidation would invalidate *all* pending entries, rather than just the entry that we intended to invalidate. --- synapse/util/caches/descriptors.py | 64 ++++++++++++++++----------- tests/util/caches/test_descriptors.py | 46 +++++++++++++++++++ 2 files changed, 84 insertions(+), 26 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index bf3a66eae4..68285a7594 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,12 +40,11 @@ _CacheSentinel = object() class CacheEntry(object): __slots__ = [ - "deferred", "sequence", "callbacks", "invalidated" + "deferred", "callbacks", "invalidated" ] - def __init__(self, deferred, sequence, callbacks): + def __init__(self, deferred, callbacks): self.deferred = deferred - self.sequence = sequence self.callbacks = set(callbacks) self.invalidated = False @@ -62,7 +62,6 @@ class Cache(object): "max_entries", "name", "keylen", - "sequence", "thread", "metrics", "_pending_deferred_cache", @@ -80,7 +79,6 @@ class Cache(object): self.name = name self.keylen = keylen - self.sequence = 0 self.thread = None self.metrics = register_cache(name, self.cache) @@ -113,11 +111,10 @@ class Cache(object): callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _CacheSentinel) if val is not _CacheSentinel: - if val.sequence == self.sequence: - val.callbacks.update(callbacks) - if update_metrics: - self.metrics.inc_hits() - return val.deferred + val.callbacks.update(callbacks) + if update_metrics: + self.metrics.inc_hits() + return val.deferred val = self.cache.get(key, _CacheSentinel, callbacks=callbacks) if val is not _CacheSentinel: @@ -137,12 +134,9 @@ class Cache(object): self.check_thread() entry = CacheEntry( deferred=value, - sequence=self.sequence, callbacks=callbacks, ) - entry.callbacks.update(callbacks) - existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry: existing_entry.invalidate() @@ -150,13 +144,25 @@ class Cache(object): self._pending_deferred_cache[key] = entry def shuffle(result): - if self.sequence == entry.sequence: - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry is entry: - self.cache.set(key, result, entry.callbacks) - else: - entry.invalidate() + existing_entry = self._pending_deferred_cache.pop(key, None) + if existing_entry is entry: + self.cache.set(key, result, entry.callbacks) else: + # oops, the _pending_deferred_cache has been updated since + # we started our query, so we are out of date. + # + # Better put back whatever we took out. (We do it this way + # round, rather than peeking into the _pending_deferred_cache + # and then removing on a match, to make the common case faster) + if existing_entry is not None: + self._pending_deferred_cache[key] = existing_entry + + # we're not going to put this entry into the cache, so need + # to make sure that the invalidation callbacks are called. + # That was probably done when _pending_deferred_cache was + # updated, but it's possible that `set` was called without + # `invalidate` being previously called, in which case it may + # not have been. Either way, let's double-check now. entry.invalidate() return result @@ -168,25 +174,29 @@ class Cache(object): def invalidate(self, key): self.check_thread() + self.cache.pop(key, None) - # Increment the sequence number so that any SELECT statements that - # raced with the INSERT don't update the cache (SYN-369) - self.sequence += 1 + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, which will (a) stop it being returned + # for future queries and (b) stop it being persisted as a proper entry + # in self.cache. entry = self._pending_deferred_cache.pop(key, None) + + # run the invalidation callbacks now, rather than waiting for the + # deferred to resolve. if entry: entry.invalidate() - self.cache.pop(key, None) - def invalidate_many(self, key): self.check_thread() if not isinstance(key, tuple): raise TypeError( "The cache key must be a tuple not %r" % (type(key),) ) - self.sequence += 1 self.cache.del_multi(key) + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, as above entry_dict = self._pending_deferred_cache.pop(key, None) if entry_dict is not None: for entry in iterate_tree_cache_entry(entry_dict): @@ -194,8 +204,10 @@ class Cache(object): def invalidate_all(self): self.check_thread() - self.sequence += 1 self.cache.clear() + for entry in self._pending_deferred_cache.itervalues(): + entry.invalidate() + self._pending_deferred_cache.clear() class _CacheDescriptorBase(object): diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3f14ab503f..2516fe40f4 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from functools import partial import logging import mock @@ -25,6 +27,50 @@ from tests import unittest logger = logging.getLogger(__name__) +class CacheTestCase(unittest.TestCase): + def test_invalidate_all(self): + cache = descriptors.Cache("testcache") + + callback_record = [False, False] + + def record_callback(idx): + callback_record[idx] = True + + # add a couple of pending entries + d1 = defer.Deferred() + cache.set("key1", d1, partial(record_callback, 0)) + + d2 = defer.Deferred() + cache.set("key2", d2, partial(record_callback, 1)) + + # lookup should return the deferreds + self.assertIs(cache.get("key1"), d1) + self.assertIs(cache.get("key2"), d2) + + # let one of the lookups complete + d2.callback("result2") + self.assertEqual(cache.get("key2"), "result2") + + # now do the invalidation + cache.invalidate_all() + + # lookup should return none + self.assertIsNone(cache.get("key1", None)) + self.assertIsNone(cache.get("key2", None)) + + # both callbacks should have been callbacked + self.assertTrue( + callback_record[0], "Invalidation callback for key1 not called", + ) + self.assertTrue( + callback_record[1], "Invalidation callback for key2 not called", + ) + + # letting the other lookup complete should do nothing + d1.callback("result1") + self.assertIsNone(cache.get("key1", None)) + + class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cache(self): From 15e8ed874ff13f30b79e6a47cf814b5a2a7dfc9a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Apr 2018 09:28:36 +0100 Subject: [PATCH 61/96] more verbosity in synctl --- synapse/app/synctl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index b0e1b5e66a..712dfa870e 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -252,6 +252,7 @@ def main(): for running_pid in running_pids: while pid_running(running_pid): time.sleep(0.2) + write("All processes exited; now restarting...") if action == "start" or action == "restart": if start_stop_synapse: From 1d71f484d4ec00fd41e3ef195622d0d5dba6d372 Mon Sep 17 00:00:00 2001 From: Krombel Date: Fri, 6 Apr 2018 12:54:09 +0200 Subject: [PATCH 62/96] use PUT instead of POST for federating groups/m.join_policy --- synapse/federation/transport/client.py | 2 +- synapse/federation/transport/server.py | 2 +- synapse/http/matrixfederationclient.py | 6 +++++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1fe162d55b..3beab47832 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -864,7 +864,7 @@ class TransportLayerClient(object): """ path = PREFIX + "/groups/%s/settings/m.join_policy" % (group_id,) - return self.client.post_json( + return self.client.put_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 3658ca75f3..b98e30459c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1131,7 +1131,7 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/settings/m.join_policy$" @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): + def on_PUT(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 9145405cb0..60a29081e8 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -286,7 +286,8 @@ class MatrixFederationHttpClient(object): headers_dict[b"Authorization"] = auth_headers @defer.inlineCallbacks - def put_json(self, destination, path, data={}, json_data_callback=None, + def put_json(self, destination, path, args={}, data={}, + json_data_callback=None, long_retries=False, timeout=None, ignore_backoff=False, backoff_on_404=False): @@ -296,6 +297,7 @@ class MatrixFederationHttpClient(object): destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. + args (dict): query params data (dict): A dict containing the data that will be used as the request body. This will be encoded as JSON. json_data_callback (callable): A callable returning the dict to @@ -342,6 +344,7 @@ class MatrixFederationHttpClient(object): path, body_callback=body_callback, headers_dict={"Content-Type": ["application/json"]}, + query_bytes=encode_query_args(args), long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -373,6 +376,7 @@ class MatrixFederationHttpClient(object): giving up. None indicates no timeout. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + args (dict): query params Returns: Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. From 7b824f147590c6519ca8889e6d406928de7f1e6e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Apr 2018 13:20:05 +0100 Subject: [PATCH 63/96] Add response size metrics --- synapse/http/server.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/http/server.py b/synapse/http/server.py index 02c7e46f08..ac75206ef5 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -87,6 +87,11 @@ response_db_sched_duration = metrics.register_counter( "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] ) +# size in bytes of the response written +response_size = metrics.register_counter( + "response_size", labels=["method", "servlet", "tag"] +) + _next_request_id = 0 @@ -400,6 +405,8 @@ class RequestMetrics(object): context.db_sched_duration_ms / 1000., request.method, self.name, tag ) + response_size.inc_by(request.sentLength, request.method, self.name, tag) + class RootRedirect(resource.Resource): """Redirects the root '/' path to another path.""" From e01ba5bda315ed80f2a809e2f2e917eaf7488fac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Apr 2018 13:45:10 +0100 Subject: [PATCH 64/96] Port script: avoid nasty errors when setting up We really shouldn't spit out "Failed to create port table", it looks scary. --- scripts/synapse_port_db | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index d46581e4e1..7fb8be3abd 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -1,6 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -491,7 +492,7 @@ class Porter(object): def create_port_table(txn): txn.execute( - "CREATE TABLE port_from_sqlite3 (" + "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" " table_name varchar(100) NOT NULL UNIQUE," " forward_rowid bigint NOT NULL," " backward_rowid bigint NOT NULL" @@ -517,14 +518,11 @@ class Porter(object): "alter_table", alter_table ) except Exception as e: - logger.info("Failed to create port table: %s", e) + pass - try: - yield self.postgres_store.runInteraction( - "create_port_table", create_port_table - ) - except Exception as e: - logger.info("Failed to create port table: %s", e) + yield self.postgres_store.runInteraction( + "create_port_table", create_port_table + ) self.progress.set_state("Setting up") From 01579384ccd26d112df5169522fd16234e8316c7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Apr 2018 13:47:11 +0100 Subject: [PATCH 65/96] Port script: clean up a bit Improve logging and comments. Group all the stuff to do with inspecting tables together rather than creating the port tables in the middle. --- scripts/synapse_port_db | 54 ++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 7fb8be3abd..dc1a10bf28 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -251,6 +251,12 @@ class Porter(object): @defer.inlineCallbacks def handle_table(self, table, postgres_size, table_size, forward_chunk, backward_chunk): + logger.info( + "Table %s: %i/%i (rows %i-%i) already ported", + table, postgres_size, table_size, + backward_chunk+1, forward_chunk-1, + ) + if not table_size: return @@ -468,28 +474,7 @@ class Porter(object): self.progress.set_state("Preparing PostgreSQL") self.setup_db(postgres_config, postgres_engine) - # Step 2. Get tables. - self.progress.set_state("Fetching tables") - sqlite_tables = yield self.sqlite_store._simple_select_onecol( - table="sqlite_master", - keyvalues={ - "type": "table", - }, - retcol="name", - ) - - postgres_tables = yield self.postgres_store._simple_select_onecol( - table="information_schema.tables", - keyvalues={}, - retcol="distinct table_name", - ) - - tables = set(sqlite_tables) & set(postgres_tables) - - self.progress.set_state("Creating tables") - - logger.info("Found %d tables", len(tables)) - + self.progress.set_state("Creating port tables") def create_port_table(txn): txn.execute( "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" @@ -524,9 +509,27 @@ class Porter(object): "create_port_table", create_port_table ) - self.progress.set_state("Setting up") + # Step 2. Get tables. + self.progress.set_state("Fetching tables") + sqlite_tables = yield self.sqlite_store._simple_select_onecol( + table="sqlite_master", + keyvalues={ + "type": "table", + }, + retcol="name", + ) - # Set up tables. + postgres_tables = yield self.postgres_store._simple_select_onecol( + table="information_schema.tables", + keyvalues={}, + retcol="distinct table_name", + ) + + tables = set(sqlite_tables) & set(postgres_tables) + logger.info("Found %d tables", len(tables)) + + # Step 3. Figure out what still needs copying + self.progress.set_state("Checking on port progress") setup_res = yield defer.gatherResults( [ self.setup_table(table) @@ -537,7 +540,8 @@ class Porter(object): consumeErrors=True, ) - # Process tables. + # Step 4. Do the copying. + self.progress.set_state("Copying to postgres") yield defer.gatherResults( [ self.handle_table(*res) From 6a9777ba028af1d9803d13e879eaf62773c5bd83 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Apr 2018 13:48:40 +0100 Subject: [PATCH 66/96] Port script: Set up state_group_id_seq Fixes https://github.com/matrix-org/synapse/issues/3050. --- scripts/synapse_port_db | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index dc1a10bf28..7b23a44854 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -550,6 +550,9 @@ class Porter(object): consumeErrors=True, ) + # Step 5. Do final post-processing + yield self._setup_state_group_id_seq() + self.progress.done() except: global end_error_exec_info @@ -709,6 +712,16 @@ class Porter(object): defer.returnValue((done, remaining + done)) + def _setup_state_group_id_seq(self): + def r(txn): + txn.execute("SELECT MAX(id) FROM state_groups") + next_id = txn.fetchone()[0]+1 + txn.execute( + "ALTER SEQUENCE state_group_id_seq RESTART WITH %s", + (next_id,), + ) + return self.postgres_store.runInteraction("setup_state_group_id_seq", r) + ############################################## ###### The following is simply UI stuff ###### From b370fe61c0aeccac7745ad404dad925ec217ba6d Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 17:18:02 +0100 Subject: [PATCH 67/96] Implement group join API --- synapse/federation/transport/client.py | 13 ++++++++ synapse/federation/transport/server.py | 18 +++++++++++ synapse/groups/groups_server.py | 45 ++++++++++++++++++++++++++ synapse/handlers/groups_local.py | 40 ++++++++++++++++++++++- synapse/storage/group_server.py | 12 +++++-- 5 files changed, 124 insertions(+), 4 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 3beab47832..370f7ba78b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -614,6 +614,19 @@ class TransportLayerClient(object): ignore_backoff=True, ) + @log_function + def join_group(self, destination, group_id, user_id, content): + """Attempts to join a group + """ + path = PREFIX + "/groups/%s/users/%s/join" % (group_id, user_id) + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) + @log_function def invite_to_group(self, destination, group_id, user_id, requester_user_id, content): """Invite a user to a group diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index b98e30459c..4c94d5a36c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -803,6 +803,23 @@ class FederationGroupsAcceptInviteServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsJoinServlet(BaseFederationServlet): + """Attempt to join a group + """ + PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/join$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + if get_domain_from_id(user_id) != origin: + raise SynapseError(403, "user_id doesn't match origin") + + new_content = yield self.handler.join_group( + group_id, user_id, content, + ) + + defer.returnValue((200, new_content)) + + class FederationGroupsRemoveUserServlet(BaseFederationServlet): """Leave or kick a user from the group """ @@ -1182,6 +1199,7 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsInvitedUsersServlet, FederationGroupsInviteServlet, FederationGroupsAcceptInviteServlet, + FederationGroupsJoinServlet, FederationGroupsRemoveUserServlet, FederationGroupsSummaryRoomsServlet, FederationGroupsCategoriesServlet, diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 70781e1854..77b6273e39 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -723,6 +723,51 @@ class GroupsServerHandler(object): "attestation": local_attestation, }) + @defer.inlineCallbacks + def join_group(self, group_id, requester_user_id, content): + """User tries to join the group. + + This will error if the group requires an invite/knock to join + """ + + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) + + group_info = yield self.store.get_group( + group_id, + ) + if not group_info['is_joinable']: + raise SynapseError(403, "Group is not publicly joinable") + + if not self.hs.is_mine_id(requester_user_id): + local_attestation = self.attestations.create_attestation( + group_id, requester_user_id, + ) + remote_attestation = content["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + user_id=requester_user_id, + group_id=group_id, + ) + else: + local_attestation = None + remote_attestation = None + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_user_to_group( + group_id, requester_user_id, + is_admin=False, + is_public=is_public, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + ) + + defer.returnValue({ + "state": "join", + "attestation": local_attestation, + }) + @defer.inlineCallbacks def knock(self, group_id, requester_user_id, content): """A user requests becoming a member of the group diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 5f7b0ff305..977993e7d4 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -229,7 +229,45 @@ class GroupsLocalHandler(object): def join_group(self, group_id, user_id, content): """Request to join a group """ - raise NotImplementedError() # TODO + if self.is_mine_id(group_id): + yield self.groups_server_handler.join_group( + group_id, user_id, content + ) + local_attestation = None + remote_attestation = None + else: + local_attestation = self.attestations.create_attestation(group_id, user_id) + content["attestation"] = local_attestation + + res = yield self.transport_client.join_group( + get_domain_from_id(group_id), group_id, user_id, content, + ) + + remote_attestation = res["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + group_id=group_id, + user_id=user_id, + server_name=get_domain_from_id(group_id), + ) + + # TODO: Check that the group is public and we're being added publically + is_publicised = content.get("publicise", False) + + token = yield self.store.register_user_group_membership( + group_id, user_id, + membership="join", + is_admin=False, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + is_publicised=is_publicised, + ) + self.notifier.on_new_event( + "groups_key", token, users=[user_id], + ) + + defer.returnValue({}) @defer.inlineCallbacks def accept_invite(self, group_id, user_id, content): diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index db316a27ec..16c11a056f 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -48,19 +48,25 @@ class GroupServerStore(SQLBaseStore): desc="set_group_join_policy", ) + @defer.inlineCallbacks def get_group(self, group_id): - return self._simple_select_one( + ret = yield self._simple_select_one( table="groups", keyvalues={ "group_id": group_id, }, retcols=( - "name", "short_description", "long_description", "avatar_url", "is_public" + "name", "short_description", "long_description", "avatar_url", "is_public", "is_joinable", ), allow_none=True, - desc="is_user_in_group", + desc="get_group", ) + if ret and 'is_joinable' in ret: + ret['is_joinable'] = bool(ret['is_joinable']) + + defer.returnValue(ret) + def get_users_in_group(self, group_id, include_private=False): # TODO: Pagination From edb45aae38b7bdeae1cbf18b435be5f8610c2d48 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 17:18:50 +0100 Subject: [PATCH 68/96] pep8 --- synapse/storage/group_server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 16c11a056f..5fbe0ada4e 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -56,7 +56,8 @@ class GroupServerStore(SQLBaseStore): "group_id": group_id, }, retcols=( - "name", "short_description", "long_description", "avatar_url", "is_public", "is_joinable", + "name", "short_description", "long_description", + "avatar_url", "is_public", "is_joinable", ), allow_none=True, desc="get_group", From 6eb3aa94b6befcc89f6fee426053b3feee316bb9 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Tue, 3 Apr 2018 11:48:17 +0100 Subject: [PATCH 69/96] Factor out add_user from accept_invite and join_group --- synapse/groups/groups_server.py | 84 ++++++++++++++------------------- 1 file changed, 36 insertions(+), 48 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 77b6273e39..2c02da4725 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -677,6 +677,40 @@ class GroupsServerHandler(object): else: raise SynapseError(502, "Unknown state returned by HS") + @defer.inlineCallbacks + def add_user(self, group_id, user_id, content): + """Add a user to a group based on a content dict. + + See accept_invite, join_group. + """ + if not self.hs.is_mine_id(user_id): + local_attestation = self.attestations.create_attestation( + group_id, user_id, + ) + + remote_attestation = content["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + user_id=user_id, + group_id=group_id, + ) + else: + local_attestation = None + remote_attestation = None + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_user_to_group( + group_id, user_id, + is_admin=False, + is_public=is_public, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + ) + + defer.returnValue(local_attestation) + @defer.inlineCallbacks def accept_invite(self, group_id, requester_user_id, content): """User tries to accept an invite to the group. @@ -693,30 +727,7 @@ class GroupsServerHandler(object): if not is_invited: raise SynapseError(403, "User not invited to group") - if not self.hs.is_mine_id(requester_user_id): - local_attestation = self.attestations.create_attestation( - group_id, requester_user_id, - ) - remote_attestation = content["attestation"] - - yield self.attestations.verify_attestation( - remote_attestation, - user_id=requester_user_id, - group_id=group_id, - ) - else: - local_attestation = None - remote_attestation = None - - is_public = _parse_visibility_from_contents(content) - - yield self.store.add_user_to_group( - group_id, requester_user_id, - is_admin=False, - is_public=is_public, - local_attestation=local_attestation, - remote_attestation=remote_attestation, - ) + local_attestation = yield self.add_user(group_id, requester_user_id, content) defer.returnValue({ "state": "join", @@ -738,30 +749,7 @@ class GroupsServerHandler(object): if not group_info['is_joinable']: raise SynapseError(403, "Group is not publicly joinable") - if not self.hs.is_mine_id(requester_user_id): - local_attestation = self.attestations.create_attestation( - group_id, requester_user_id, - ) - remote_attestation = content["attestation"] - - yield self.attestations.verify_attestation( - remote_attestation, - user_id=requester_user_id, - group_id=group_id, - ) - else: - local_attestation = None - remote_attestation = None - - is_public = _parse_visibility_from_contents(content) - - yield self.store.add_user_to_group( - group_id, requester_user_id, - is_admin=False, - is_public=is_public, - local_attestation=local_attestation, - remote_attestation=remote_attestation, - ) + local_attestation = yield self.add_user(group_id, requester_user_id, content) defer.returnValue({ "state": "join", From f8d1917fce3b08b613f76131d6a82a7576379251 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 16:31:24 +0100 Subject: [PATCH 70/96] Fix federation client `set_group_joinable` typo --- synapse/federation/transport/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 370f7ba78b..67ff7da51b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -871,7 +871,7 @@ class TransportLayerClient(object): ) @log_function - def set_group_joinable(self, destination, group_id, requester_user_id, + def set_group_join_policy(self, destination, group_id, requester_user_id, content): """Sets the join policy for a group """ From ae85c7804e733aa1adaed06a9de51445a084858e Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 16:31:57 +0100 Subject: [PATCH 71/96] is_joinable -> join_rule --- synapse/groups/groups_server.py | 2 +- synapse/storage/group_server.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 2c02da4725..507ec232c5 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -746,7 +746,7 @@ class GroupsServerHandler(object): group_info = yield self.store.get_group( group_id, ) - if not group_info['is_joinable']: + if group_info['join_policy'] != "open": raise SynapseError(403, "Group is not publicly joinable") local_attestation = yield self.add_user(group_id, requester_user_id, content) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 5fbe0ada4e..d81609dd16 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -57,15 +57,12 @@ class GroupServerStore(SQLBaseStore): }, retcols=( "name", "short_description", "long_description", - "avatar_url", "is_public", "is_joinable", + "avatar_url", "is_public", "join_rule", ), allow_none=True, desc="get_group", ) - if ret and 'is_joinable' in ret: - ret['is_joinable'] = bool(ret['is_joinable']) - defer.returnValue(ret) def get_users_in_group(self, group_id, include_private=False): From 87c864b6984f6876f224e0d4ac2e2b872d2723ce Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 16:49:22 +0100 Subject: [PATCH 72/96] join_rule -> join_policy --- synapse/storage/group_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index d81609dd16..8bd8ca8124 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -57,7 +57,7 @@ class GroupServerStore(SQLBaseStore): }, retcols=( "name", "short_description", "long_description", - "avatar_url", "is_public", "join_rule", + "avatar_url", "is_public", "join_policy", ), allow_none=True, desc="get_group", From cd087a265db8037ce2fe158ddc0e9b35a612d4bb Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 17:14:27 +0100 Subject: [PATCH 73/96] Don't use redundant inlineCallbacks --- synapse/storage/group_server.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 8bd8ca8124..da05ccb027 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -48,9 +48,8 @@ class GroupServerStore(SQLBaseStore): desc="set_group_join_policy", ) - @defer.inlineCallbacks def get_group(self, group_id): - ret = yield self._simple_select_one( + return self._simple_select_one( table="groups", keyvalues={ "group_id": group_id, @@ -63,8 +62,6 @@ class GroupServerStore(SQLBaseStore): desc="get_group", ) - defer.returnValue(ret) - def get_users_in_group(self, group_id, include_private=False): # TODO: Pagination From 6850f8aea3482443dff1c330fb63f7ca8dde0025 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 17:16:31 +0100 Subject: [PATCH 74/96] Get group_info from existing call to check_group_is_ours --- synapse/groups/groups_server.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 507ec232c5..1b516011d8 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -741,11 +741,7 @@ class GroupsServerHandler(object): This will error if the group requires an invite/knock to join """ - yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - group_info = yield self.store.get_group( - group_id, - ) + group_info = yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) if group_info['join_policy'] != "open": raise SynapseError(403, "Group is not publicly joinable") From 112c2253e2846b48b75f4171d9dd94bfe6ecc8a1 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 17:32:20 +0100 Subject: [PATCH 75/96] pep8 --- synapse/federation/transport/client.py | 2 +- synapse/groups/groups_server.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 67ff7da51b..50a967a7ec 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -872,7 +872,7 @@ class TransportLayerClient(object): @log_function def set_group_join_policy(self, destination, group_id, requester_user_id, - content): + content): """Sets the join policy for a group """ path = PREFIX + "/groups/%s/settings/m.join_policy" % (group_id,) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 1b516011d8..15d31549de 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -741,7 +741,9 @@ class GroupsServerHandler(object): This will error if the group requires an invite/knock to join """ - group_info = yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) + group_info = yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True + ) if group_info['join_policy'] != "open": raise SynapseError(403, "Group is not publicly joinable") From b4478e586fe52392a439c94f525c3d885454f0a8 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 11:37:49 +0100 Subject: [PATCH 76/96] add_user -> _add_user --- synapse/groups/groups_server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 15d31549de..72dfed7dbb 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -678,7 +678,7 @@ class GroupsServerHandler(object): raise SynapseError(502, "Unknown state returned by HS") @defer.inlineCallbacks - def add_user(self, group_id, user_id, content): + def _add_user(self, group_id, user_id, content): """Add a user to a group based on a content dict. See accept_invite, join_group. @@ -727,7 +727,7 @@ class GroupsServerHandler(object): if not is_invited: raise SynapseError(403, "User not invited to group") - local_attestation = yield self.add_user(group_id, requester_user_id, content) + local_attestation = yield self._add_user(group_id, requester_user_id, content) defer.returnValue({ "state": "join", @@ -747,7 +747,7 @@ class GroupsServerHandler(object): if group_info['join_policy'] != "open": raise SynapseError(403, "Group is not publicly joinable") - local_attestation = yield self.add_user(group_id, requester_user_id, content) + local_attestation = yield self._add_user(group_id, requester_user_id, content) defer.returnValue({ "state": "join", From 6bd1b7053e41350fcc1b451e40da82257e2120b5 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 11:44:18 +0100 Subject: [PATCH 77/96] By default, join policy is "invite" --- synapse/groups/groups_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 72dfed7dbb..8310dea031 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -905,7 +905,7 @@ def _parse_join_policy_dict(join_policy_dict): """ join_policy_type = join_policy_dict.get("type") if not join_policy_type: - return True + return "invite" if join_policy_type not in ("invite", "open"): raise SynapseError( From 7945435587704c5abd075140bdd46c11db93c255 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 14:06:32 +0100 Subject: [PATCH 78/96] When exposing group state, return is_openly_joinable as opposed to join_policy, which is really only pertinent to the synapse implementation of the group server. By doing this we keep the group server concept extensible by allowing arbitrarily complex rules for deciding whether a group is openly joinable. --- synapse/groups/groups_server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 8310dea031..290eec7127 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -407,6 +407,11 @@ class GroupsServerHandler(object): group_description = yield self.store.get_group(group_id) if group_description: + join_policy = group_description['join_policy'] + del group_description['join_policy'] + + group_description['is_openly_joinable'] = join_policy == "open" + defer.returnValue(group_description) else: raise SynapseError(404, "Unknown group") From db2fd801f722ae8341b36314fb8929a80fd53996 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 15:51:15 +0100 Subject: [PATCH 79/96] Explicitly grab individual columns from group object --- synapse/groups/groups_server.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 290eec7127..ad937c1721 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -404,13 +404,15 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id) - group_description = yield self.store.get_group(group_id) + group = yield self.store.get_group(group_id) - if group_description: - join_policy = group_description['join_policy'] - del group_description['join_policy'] - - group_description['is_openly_joinable'] = join_policy == "open" + if group: + cols = [ + "name", "short_description", "long_description", + "avatar_url", "is_public", + ] + group_description = { key: group[key] for key in cols } + group_description["is_openly_joinable"] = group['join_policy'] == "open" defer.returnValue(group_description) else: From 020a5013544b1f7046faa97b83eb2acc613334b1 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 16:02:06 +0100 Subject: [PATCH 80/96] de-lint, quote consistency --- synapse/groups/groups_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index ad937c1721..2d95b04e0c 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -411,8 +411,8 @@ class GroupsServerHandler(object): "name", "short_description", "long_description", "avatar_url", "is_public", ] - group_description = { key: group[key] for key in cols } - group_description["is_openly_joinable"] = group['join_policy'] == "open" + group_description = {key: group[key] for key in cols} + group_description["is_openly_joinable"] = group["join_policy"] == "open" defer.returnValue(group_description) else: From 13decdbf96981782616a3ee1826fce1213a1bc89 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 9 Apr 2018 12:58:37 +0100 Subject: [PATCH 81/96] Revert "Merge pull request #3066 from matrix-org/rav/remove_redundant_metrics" We aren't ready to release this yet, so I'm reverting it for now. This reverts commit d1679a4ed7947b0814e0f2af9b888a16c588f1a1, reversing changes made to e089100c6231541c446e37e157dec8feed02d283. --- CHANGES.rst | 9 --------- UPGRADE.rst | 9 +-------- docs/metrics-howto.rst | 11 ----------- synapse/http/server.py | 26 ++++++++++++++++++++++++++ synapse/util/metrics.py | 25 +++++++++++++++++++++++++ 5 files changed, 52 insertions(+), 28 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 5fbad54427..38372381ac 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,12 +1,3 @@ -Changes in synapse v0.28.0 (2018-xx-xx) -======================================= - -As previously advised, this release removes a number of redundant Prometheus -metrics. Administrators may need to update their dashboards and alerting rules -to use the updated metric names, if they have not already done so. See -`docs/metrics-howto.rst `_ -for more details. - Changes in synapse v0.27.2 (2018-03-26) ======================================= diff --git a/UPGRADE.rst b/UPGRADE.rst index 39a16b1c0c..f6bb1070b1 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -52,7 +52,7 @@ Upgrading to $NEXT_VERSION ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. @@ -60,13 +60,6 @@ We would appreciate it if you could assist by ensuring this module is available and ``report_stats`` is enabled. This will let us see if performance changes to synapse are having an impact to the general community. -This release also removes a number of redundant Prometheus metrics. -Administrators may need to update their dashboards and alerting rules to use -the updated metric names, if they have not already done so. See -`docs/metrics-howto.rst `_ -for more details. - - Upgrading to v0.15.0 ==================== diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 5e2d7c52ec..8acc479bc3 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -34,17 +34,6 @@ How to monitor Synapse metrics using Prometheus Restart prometheus. -Deprecated metrics removed in 0.28.0 ------------------------------------- - -Synapse 0.28.0 removes all of the metrics deprecated by 0.27.0, which are those -listed under "Old name" below. This has been done to reduce the bandwidth used -by gathering metrics and the storage requirements for the Prometheus server, as -well as reducing CPU overhead for both Synapse and Prometheus. - -Administrators should update any alerts or monitoring dashboards to use the -"New name" listed below. - Block and response metrics renamed for 0.27.0 --------------------------------------------- diff --git a/synapse/http/server.py b/synapse/http/server.py index ac75206ef5..64e083ebfc 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -47,6 +47,17 @@ metrics = synapse.metrics.get_metrics_for(__name__) response_count = metrics.register_counter( "response_count", labels=["method", "servlet", "tag"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_requests", + "_response_time:count", + "_response_ru_utime:count", + "_response_ru_stime:count", + "_response_db_txn_count:count", + "_response_db_txn_duration:count", + ) + ) ) requests_counter = metrics.register_counter( @@ -62,24 +73,39 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_counter( "response_time_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_time:total", + ), ) response_ru_utime = metrics.register_counter( "response_ru_utime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_utime:total", + ), ) response_ru_stime = metrics.register_counter( "response_ru_stime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_stime:total", + ), ) response_db_txn_count = metrics.register_counter( "response_db_txn_count", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_count:total", + ), ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_duration:total", + ), ) # seconds spent waiting for a db connection, when processing this request diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index c3d8237e8f..e4b5687a4b 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -31,28 +31,53 @@ metrics = synapse.metrics.get_metrics_for(__name__) block_counter = metrics.register_counter( "block_count", labels=["block_name"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_block_timer:count", + "_block_ru_utime:count", + "_block_ru_stime:count", + "_block_db_txn_count:count", + "_block_db_txn_duration:count", + ) + ) ) block_timer = metrics.register_counter( "block_time_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_timer:total", + ), ) block_ru_utime = metrics.register_counter( "block_ru_utime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_utime:total", + ), ) block_ru_stime = metrics.register_counter( "block_ru_stime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_stime:total", + ), ) block_db_txn_count = metrics.register_counter( "block_db_txn_count", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_count:total", + ), ) # seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_duration:total", + ), ) # seconds spent waiting for a db connection, in this block From 687f3451bde8e954af7c51dc9cea52e53de84e32 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 13:44:05 +0100 Subject: [PATCH 82/96] 0.27.3 --- CHANGES.rst | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 1372de4246..9f974e6875 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,49 @@ +Changes in synapse v0.27.3 (2018-04-09) +======================================= + +Notable changes include API support for joinability of groups. Also new metrics +and phone home stats. Phone home stats include better visibility of system usage +so we can tweak synpase to work better for all users rather than our own experience +with matrix.org. Also, recording 'r30' stat which is the measure we use to track +overal growth of the Matrix ecosystem. It is defined as:- + "Counts the number of native 30 day retained users, defined as:- + * Users who have created their accounts more than 30 days + * Where last seen at most 30 days ago + * Where account creation and last_seen are > 30 days" + + +Features +* Add joinability for groups (PR #3045) +* Implement group join API (PR #3046) +* Add counter metrics for calculating state delta (PR #3033) +* R30 stats (PR #3041) +* Measure time it takes to calculate state group ID (PR #3043) +* Add basic performance statistics to phone home (PR #3044) +* Add response size metrics (PR #3071) +* phone home cache size configurations (PR #3063) + +Changes +* Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live! +* Replace old style error catching with 'as' keyword (PR #3000) Thanks to @NotAFile! +* Use .iter* to avoid copies in StateHandler (PR #3006) +* Linearize calls to _generate_user_id (PR #3029) +* Remove last usage of ujson (PR #3030) +* Use simplejson throughout (PR #3048) +* Use static JSONEncoders (PR #3049) +* Remove uses of events.content (PR #3060) +* Improve database cache performance (PR #3068) + +Bug fixes +* Add room_id to the response of `rooms/{roomId}/join` (PR #2986) Thanks to @jplatte! +* Fix replication after switch to simplejson (PR #3015) +* Fix replication after switch to simplejson (PR #3015) +* 404 correctly on missing paths via NoResource (PR #3022) +* Fix error when claiming e2e keys from offline servers (PR #3034) +* fix tests/storage/test_user_directory.py (PR #3042) +* use PUT instead of POST for federating groups/m.join_policy (PR #3070) Thanks to @krombel! +* postgres port script: fix state_groups_pkey error (PR #3072) + + Changes in synapse v0.27.2 (2018-03-26) ======================================= From 072fb594465126a523126b7607d19fb2f190e01a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 13:49:25 +0100 Subject: [PATCH 83/96] bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index a9d5198aba..1fa2de5268 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.27.2" +__version__ = "0.27.3" From c115deed12746f08ccc6ef7294cc9649c99aa1b7 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 15:54:32 +0100 Subject: [PATCH 84/96] Update CHANGES.rst --- CHANGES.rst | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index df469a9930..23ee926f36 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -12,7 +12,8 @@ overal growth of the Matrix ecosystem. It is defined as:- * Where account creation and last_seen are > 30 days" -Features +Features: + * Add joinability for groups (PR #3045) * Implement group join API (PR #3046) * Add counter metrics for calculating state delta (PR #3033) @@ -22,7 +23,8 @@ Features * Add response size metrics (PR #3071) * phone home cache size configurations (PR #3063) -Changes +Changes: + * Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live! * Replace old style error catching with 'as' keyword (PR #3000) Thanks to @NotAFile! * Use .iter* to avoid copies in StateHandler (PR #3006) @@ -33,7 +35,8 @@ Changes * Remove uses of events.content (PR #3060) * Improve database cache performance (PR #3068) -Bug fixes +Bug fixes: + * Add room_id to the response of `rooms/{roomId}/join` (PR #2986) Thanks to @jplatte! * Fix replication after switch to simplejson (PR #3015) * Fix replication after switch to simplejson (PR #3015) From 21d5a2a08e053bf69082dd0c7142e9b299cee0b0 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 15:55:41 +0100 Subject: [PATCH 85/96] Update CHANGES.rst --- CHANGES.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 23ee926f36..e8ff469a13 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -6,7 +6,8 @@ and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording 'r30' stat which is the measure we use to track overal growth of the Matrix ecosystem. It is defined as:- - "Counts the number of native 30 day retained users, defined as:- + +Counts the number of native 30 day retained users, defined as:- * Users who have created their accounts more than 30 days * Where last seen at most 30 days ago * Where account creation and last_seen are > 30 days" From d9ae2bc826910c5e70766fb70f0d488faf350fe7 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 16:00:31 +0100 Subject: [PATCH 86/96] bump version to release candidate --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 1fa2de5268..354415e5c6 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.27.3" +__version__ = "0.27.3-rc1" From b151eb14a2c680467ac0ff6198a016cba3156fd2 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 16:01:59 +0100 Subject: [PATCH 87/96] Update CHANGES.rst --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index e8ff469a13..d80756dc7c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -Changes in synapse v0.27.3 (2018-04-09) +Changes in synapse v0.27.3-rc1 (2018-04-09) ======================================= Notable changes include API support for joinability of groups. Also new metrics From 5232d3bfb1168a00c136c62359bad4f0fbec0ae2 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 17:25:57 +0100 Subject: [PATCH 88/96] version bump v0.27.3-rc2 --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 354415e5c6..7f6090baf8 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.27.3-rc1" +__version__ = "0.27.3-rc2" From 64bc2162ef1c51cf5d5baa3cd3b303216b55df8c Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 17:50:36 +0100 Subject: [PATCH 89/96] Fix psycopg2 interpolation --- synapse/storage/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 4800584b59..01c580b66e 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -289,11 +289,11 @@ class DataStore(RoomMemberStore, RoomStore, user_id, last_seen, CASE - WHEN user_agent LIKE '%Android%' THEN 'android' - WHEN user_agent LIKE '%iOS%' THEN 'ios' - WHEN user_agent LIKE '%Electron%' THEN 'electron' - WHEN user_agent LIKE '%Mozilla%' THEN 'web' - WHEN user_agent LIKE '%Gecko%' THEN 'web' + WHEN user_agent LIKE '%%Android%%' THEN 'android' + WHEN user_agent LIKE '%%iOS%%' THEN 'ios' + WHEN user_agent LIKE '%%Electron%%' THEN 'electron' + WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' + WHEN user_agent LIKE '%%Gecko%%' THEN 'web' ELSE 'unknown' END AS platform From 9a311adfea940984f6a007a97afec39f23cb1be2 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 17:52:08 +0100 Subject: [PATCH 90/96] v0.27.3-rc2 --- CHANGES.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index d80756dc7c..684c5c7883 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,8 @@ +Changes in synapse v0.27.3-rc2 (2018-04-09) +========================================== +v0.27.3-rc1 used a stale version of the develop branch so the changelog overstates +the functionality. v0.27.3-rc2 is up to date, rc1 should be ignored. + Changes in synapse v0.27.3-rc1 (2018-04-09) ======================================= From 87770300d590eb105694e1cbc24aa798f788ca52 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 18:38:59 +0100 Subject: [PATCH 91/96] Fix msec to sec --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 01c580b66e..e736a857ea 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -276,7 +276,7 @@ class DataStore(RoomMemberStore, RoomStore, def _count_r30_users(txn): thirty_days_in_secs = 86400 * 30 now = int(self._clock.time_msec()) - thirty_days_ago_in_secs = now - thirty_days_in_secs + thirty_days_ago_in_secs = now/1000 - thirty_days_in_secs sql = """ SELECT platform, COALESCE(count(*), 0) FROM ( From 61b439c90437527afdfa6b46e5ab88de6b074792 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 18:43:48 +0100 Subject: [PATCH 92/96] Fix msec to sec, again --- synapse/storage/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index e736a857ea..417b9aa2a8 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -275,9 +275,9 @@ class DataStore(RoomMemberStore, RoomStore, """ def _count_r30_users(txn): thirty_days_in_secs = 86400 * 30 - now = int(self._clock.time_msec()) - thirty_days_ago_in_secs = now/1000 - thirty_days_in_secs - + now = int(self._clock.time()) + thirty_days_ago_in_secs = now - thirty_days_in_secs + print str(thirty_days_ago_in_secs) sql = """ SELECT platform, COALESCE(count(*), 0) FROM ( SELECT From 41e06118954234878311cea3e7a55a88e6eec784 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 9 Apr 2018 18:44:20 +0100 Subject: [PATCH 93/96] remove errant print --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 417b9aa2a8..eacd49d6a5 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -277,7 +277,7 @@ class DataStore(RoomMemberStore, RoomStore, thirty_days_in_secs = 86400 * 30 now = int(self._clock.time()) thirty_days_ago_in_secs = now - thirty_days_in_secs - print str(thirty_days_ago_in_secs) + sql = """ SELECT platform, COALESCE(count(*), 0) FROM ( SELECT From dab87b84a330467abc037489ac2034b94ddadb63 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Apr 2018 11:16:08 +0100 Subject: [PATCH 94/96] URL quote path segments over federation --- synapse/federation/transport/client.py | 128 +++++++++++++++---------- 1 file changed, 80 insertions(+), 48 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 50a967a7ec..1976e5d2fb 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -21,6 +21,7 @@ from synapse.api.urls import FEDERATION_PREFIX as PREFIX from synapse.util.logutils import log_function import logging +import urllib logger = logging.getLogger(__name__) @@ -50,7 +51,7 @@ class TransportLayerClient(object): logger.debug("get_room_state dest=%s, room=%s", destination, room_id) - path = PREFIX + "/state/%s/" % room_id + path = _create_path(PREFIX, "/state/%s/", room_id) return self.client.get_json( destination, path=path, args={"event_id": event_id}, ) @@ -72,7 +73,7 @@ class TransportLayerClient(object): logger.debug("get_room_state_ids dest=%s, room=%s", destination, room_id) - path = PREFIX + "/state_ids/%s/" % room_id + path = _create_path(PREFIX, "/state_ids/%s/", room_id) return self.client.get_json( destination, path=path, args={"event_id": event_id}, ) @@ -94,7 +95,7 @@ class TransportLayerClient(object): logger.debug("get_pdu dest=%s, event_id=%s", destination, event_id) - path = PREFIX + "/event/%s/" % (event_id, ) + path = _create_path(PREFIX, "/event/%s/", event_id) return self.client.get_json(destination, path=path, timeout=timeout) @log_function @@ -120,7 +121,7 @@ class TransportLayerClient(object): # TODO: raise? return - path = PREFIX + "/backfill/%s/" % (room_id,) + path = _create_path(PREFIX, "/backfill/%s/", room_id) args = { "v": event_tuples, @@ -158,9 +159,11 @@ class TransportLayerClient(object): # generated by the json_data_callback. json_data = transaction.get_dict() + path = _create_path(PREFIX, "/send/%s/", transaction.transaction_id) + response = yield self.client.put_json( transaction.destination, - path=PREFIX + "/send/%s/" % transaction.transaction_id, + path=path, data=json_data, json_data_callback=json_data_callback, long_retries=True, @@ -178,7 +181,7 @@ class TransportLayerClient(object): @log_function def make_query(self, destination, query_type, args, retry_on_dns_fail, ignore_backoff=False): - path = PREFIX + "/query/%s" % query_type + path = _create_path(PREFIX, "/query/%s", query_type) content = yield self.client.get_json( destination=destination, @@ -223,7 +226,7 @@ class TransportLayerClient(object): "make_membership_event called with membership='%s', must be one of %s" % (membership, ",".join(valid_memberships)) ) - path = PREFIX + "/make_%s/%s/%s" % (membership, room_id, user_id) + path = _create_path(PREFIX, "/make_%s/%s/%s", membership, room_id, user_id) ignore_backoff = False retry_on_dns_fail = False @@ -249,7 +252,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function def send_join(self, destination, room_id, event_id, content): - path = PREFIX + "/send_join/%s/%s" % (room_id, event_id) + path = _create_path(PREFIX, "/send_join/%s/%s", room_id, event_id) response = yield self.client.put_json( destination=destination, @@ -262,7 +265,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function def send_leave(self, destination, room_id, event_id, content): - path = PREFIX + "/send_leave/%s/%s" % (room_id, event_id) + path = _create_path(PREFIX, "/send_leave/%s/%s", room_id, event_id) response = yield self.client.put_json( destination=destination, @@ -281,7 +284,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function def send_invite(self, destination, room_id, event_id, content): - path = PREFIX + "/invite/%s/%s" % (room_id, event_id) + path = _create_path(PREFIX, "/invite/%s/%s", room_id, event_id) response = yield self.client.put_json( destination=destination, @@ -323,7 +326,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function def exchange_third_party_invite(self, destination, room_id, event_dict): - path = PREFIX + "/exchange_third_party_invite/%s" % (room_id,) + path = _create_path(PREFIX, "/exchange_third_party_invite/%s", room_id,) response = yield self.client.put_json( destination=destination, @@ -336,7 +339,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function def get_event_auth(self, destination, room_id, event_id): - path = PREFIX + "/event_auth/%s/%s" % (room_id, event_id) + path = _create_path(PREFIX, "/event_auth/%s/%s", room_id, event_id) content = yield self.client.get_json( destination=destination, @@ -348,7 +351,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function def send_query_auth(self, destination, room_id, event_id, content): - path = PREFIX + "/query_auth/%s/%s" % (room_id, event_id) + path = _create_path(PREFIX, "/query_auth/%s/%s", room_id, event_id) content = yield self.client.post_json( destination=destination, @@ -410,7 +413,7 @@ class TransportLayerClient(object): Returns: A dict containg the device keys. """ - path = PREFIX + "/user/devices/" + user_id + path = _create_path(PREFIX, "/user/devices/%s", user_id) content = yield self.client.get_json( destination=destination, @@ -460,7 +463,7 @@ class TransportLayerClient(object): @log_function def get_missing_events(self, destination, room_id, earliest_events, latest_events, limit, min_depth, timeout): - path = PREFIX + "/get_missing_events/%s" % (room_id,) + path = _create_path(PREFIX, "/get_missing_events/%s", room_id,) content = yield self.client.post_json( destination=destination, @@ -480,7 +483,7 @@ class TransportLayerClient(object): def get_group_profile(self, destination, group_id, requester_user_id): """Get a group profile """ - path = PREFIX + "/groups/%s/profile" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/profile", group_id,) return self.client.get_json( destination=destination, @@ -499,7 +502,7 @@ class TransportLayerClient(object): requester_user_id (str) content (dict): The new profile of the group """ - path = PREFIX + "/groups/%s/profile" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/profile", group_id,) return self.client.post_json( destination=destination, @@ -513,7 +516,7 @@ class TransportLayerClient(object): def get_group_summary(self, destination, group_id, requester_user_id): """Get a group summary """ - path = PREFIX + "/groups/%s/summary" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/summary", group_id,) return self.client.get_json( destination=destination, @@ -526,7 +529,7 @@ class TransportLayerClient(object): def get_rooms_in_group(self, destination, group_id, requester_user_id): """Get all rooms in a group """ - path = PREFIX + "/groups/%s/rooms" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/rooms", group_id,) return self.client.get_json( destination=destination, @@ -539,7 +542,7 @@ class TransportLayerClient(object): content): """Add a room to a group """ - path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) + path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,) return self.client.post_json( destination=destination, @@ -553,7 +556,10 @@ class TransportLayerClient(object): config_key, content): """Update room in group """ - path = PREFIX + "/groups/%s/room/%s/config/%s" % (group_id, room_id, config_key,) + path = _create_path( + PREFIX, "/groups/%s/room/%s/config/%s", + group_id, room_id, config_key, + ) return self.client.post_json( destination=destination, @@ -566,7 +572,7 @@ class TransportLayerClient(object): def remove_room_from_group(self, destination, group_id, requester_user_id, room_id): """Remove a room from a group """ - path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) + path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,) return self.client.delete_json( destination=destination, @@ -579,7 +585,7 @@ class TransportLayerClient(object): def get_users_in_group(self, destination, group_id, requester_user_id): """Get users in a group """ - path = PREFIX + "/groups/%s/users" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/users", group_id,) return self.client.get_json( destination=destination, @@ -592,7 +598,7 @@ class TransportLayerClient(object): def get_invited_users_in_group(self, destination, group_id, requester_user_id): """Get users that have been invited to a group """ - path = PREFIX + "/groups/%s/invited_users" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/invited_users", group_id,) return self.client.get_json( destination=destination, @@ -605,7 +611,10 @@ class TransportLayerClient(object): def accept_group_invite(self, destination, group_id, user_id, content): """Accept a group invite """ - path = PREFIX + "/groups/%s/users/%s/accept_invite" % (group_id, user_id) + path = _create_path( + PREFIX, "/groups/%s/users/%s/accept_invite", + group_id, user_id, + ) return self.client.post_json( destination=destination, @@ -618,7 +627,7 @@ class TransportLayerClient(object): def join_group(self, destination, group_id, user_id, content): """Attempts to join a group """ - path = PREFIX + "/groups/%s/users/%s/join" % (group_id, user_id) + path = _create_path(PREFIX, "/groups/%s/users/%s/join", group_id, user_id) return self.client.post_json( destination=destination, @@ -631,7 +640,7 @@ class TransportLayerClient(object): def invite_to_group(self, destination, group_id, user_id, requester_user_id, content): """Invite a user to a group """ - path = PREFIX + "/groups/%s/users/%s/invite" % (group_id, user_id) + path = _create_path(PREFIX, "/groups/%s/users/%s/invite", group_id, user_id) return self.client.post_json( destination=destination, @@ -647,7 +656,7 @@ class TransportLayerClient(object): invited. """ - path = PREFIX + "/groups/local/%s/users/%s/invite" % (group_id, user_id) + path = _create_path(PREFIX, "/groups/local/%s/users/%s/invite", group_id, user_id) return self.client.post_json( destination=destination, @@ -661,7 +670,7 @@ class TransportLayerClient(object): user_id, content): """Remove a user fron a group """ - path = PREFIX + "/groups/%s/users/%s/remove" % (group_id, user_id) + path = _create_path(PREFIX, "/groups/%s/users/%s/remove", group_id, user_id) return self.client.post_json( destination=destination, @@ -678,7 +687,7 @@ class TransportLayerClient(object): kicked from the group. """ - path = PREFIX + "/groups/local/%s/users/%s/remove" % (group_id, user_id) + path = _create_path(PREFIX, "/groups/local/%s/users/%s/remove", group_id, user_id) return self.client.post_json( destination=destination, @@ -693,7 +702,7 @@ class TransportLayerClient(object): the attestations """ - path = PREFIX + "/groups/%s/renew_attestation/%s" % (group_id, user_id) + path = _create_path(PREFIX, "/groups/%s/renew_attestation/%s", group_id, user_id) return self.client.post_json( destination=destination, @@ -708,11 +717,12 @@ class TransportLayerClient(object): """Update a room entry in a group summary """ if category_id: - path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % ( + path = _create_path( + PREFIX, "/groups/%s/summary/categories/%s/rooms/%s", group_id, category_id, room_id, ) else: - path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,) + path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,) return self.client.post_json( destination=destination, @@ -728,11 +738,12 @@ class TransportLayerClient(object): """Delete a room entry in a group summary """ if category_id: - path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % ( + path = _create_path( + PREFIX + "/groups/%s/summary/categories/%s/rooms/%s", group_id, category_id, room_id, ) else: - path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,) + path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,) return self.client.delete_json( destination=destination, @@ -745,7 +756,7 @@ class TransportLayerClient(object): def get_group_categories(self, destination, group_id, requester_user_id): """Get all categories in a group """ - path = PREFIX + "/groups/%s/categories" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/categories", group_id,) return self.client.get_json( destination=destination, @@ -758,7 +769,7 @@ class TransportLayerClient(object): def get_group_category(self, destination, group_id, requester_user_id, category_id): """Get category info in a group """ - path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) + path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,) return self.client.get_json( destination=destination, @@ -772,7 +783,7 @@ class TransportLayerClient(object): content): """Update a category in a group """ - path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) + path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,) return self.client.post_json( destination=destination, @@ -787,7 +798,7 @@ class TransportLayerClient(object): category_id): """Delete a category in a group """ - path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) + path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,) return self.client.delete_json( destination=destination, @@ -800,7 +811,7 @@ class TransportLayerClient(object): def get_group_roles(self, destination, group_id, requester_user_id): """Get all roles in a group """ - path = PREFIX + "/groups/%s/roles" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/roles", group_id,) return self.client.get_json( destination=destination, @@ -813,7 +824,7 @@ class TransportLayerClient(object): def get_group_role(self, destination, group_id, requester_user_id, role_id): """Get a roles info """ - path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) + path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,) return self.client.get_json( destination=destination, @@ -827,7 +838,7 @@ class TransportLayerClient(object): content): """Update a role in a group """ - path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) + path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,) return self.client.post_json( destination=destination, @@ -841,7 +852,7 @@ class TransportLayerClient(object): def delete_group_role(self, destination, group_id, requester_user_id, role_id): """Delete a role in a group """ - path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) + path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,) return self.client.delete_json( destination=destination, @@ -856,11 +867,12 @@ class TransportLayerClient(object): """Update a users entry in a group """ if role_id: - path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % ( + path = _create_path( + PREFIX, "/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id, ) else: - path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,) + path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,) return self.client.post_json( destination=destination, @@ -875,7 +887,7 @@ class TransportLayerClient(object): content): """Sets the join policy for a group """ - path = PREFIX + "/groups/%s/settings/m.join_policy" % (group_id,) + path = _create_path(PREFIX, "/groups/%s/settings/m.join_policy", group_id,) return self.client.put_json( destination=destination, @@ -891,11 +903,12 @@ class TransportLayerClient(object): """Delete a users entry in a group """ if role_id: - path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % ( + path = _create_path( + PREFIX, "/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id, ) else: - path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,) + path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,) return self.client.delete_json( destination=destination, @@ -918,3 +931,22 @@ class TransportLayerClient(object): data=content, ignore_backoff=True, ) + + +def _create_path(prefix, path, *args): + """Creates a path from the prefix, path template and args. Ensures that + all args are url encoded. + + Example: + + _create_path(PREFIX, "/event/%s/", event_id) + + Args: + prefix (str) + path (str): String template for the path + args: ([str]): Args to insert into path. Each arg will be url encoded + + Returns: + str + """ + return prefix + path % tuple(urllib.quote(arg) for arg in args) From 11d2609da70af797405241cdf7d9df19db5628f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Apr 2018 11:24:40 +0100 Subject: [PATCH 95/96] Ensure slashes are escaped --- synapse/federation/transport/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1976e5d2fb..6db8efa6dd 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -949,4 +949,4 @@ def _create_path(prefix, path, *args): Returns: str """ - return prefix + path % tuple(urllib.quote(arg) for arg in args) + return prefix + path % tuple(urllib.quote(arg, "") for arg in args) From 781cd8c54f15378fe950a07a6e949fa65a499b12 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 11 Apr 2018 10:54:43 +0100 Subject: [PATCH 96/96] bump version/changelog --- CHANGES.rst | 8 ++++++++ synapse/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 684c5c7883..a838c092fe 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,13 @@ +Changes in synapse v0.27.3 (2018-04-11) +====================================== + +Bug fixes: + +* URL quote path segments over federation (#3082) + Changes in synapse v0.27.3-rc2 (2018-04-09) ========================================== + v0.27.3-rc1 used a stale version of the develop branch so the changelog overstates the functionality. v0.27.3-rc2 is up to date, rc1 should be ignored. diff --git a/synapse/__init__.py b/synapse/__init__.py index 7f6090baf8..1fa2de5268 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.27.3-rc2" +__version__ = "0.27.3"