From fac3c03087b14abf7b9857f937fd3311e0619a6f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Feb 2017 18:27:24 +0000 Subject: [PATCH 01/57] Be more agressive about purging old room event_push_actions --- synapse/storage/event_push_actions.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 7de3e8c58c..522d0114cb 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -450,8 +450,12 @@ class EventPushActionsStore(SQLBaseStore): def _remove_old_push_actions_before_txn(self, txn, room_id, user_id, topological_ordering): """ - Purges old, stale push actions for a user and room before a given - topological_ordering + Purges old push actions for a user and room before a given + topological_ordering. + + We however keep a months worth of highlighted notifications, so that + users can still get a list of recent highlights. + Args: txn: The transcation room_id: Room ID to delete from @@ -475,7 +479,8 @@ class EventPushActionsStore(SQLBaseStore): txn.execute( "DELETE FROM event_push_actions " " WHERE user_id = ? AND room_id = ? AND " - " topological_ordering < ? AND stream_ordering < ?", + " topological_ordering < ?" + " AND ((stream_ordering < ? AND highlight = 1) or highlight = 0)", (user_id, room_id, topological_ordering, self.stream_ordering_month_ago) ) From 7723b4caa4a1d4e0ee252165dcfaf910bda31ef2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Feb 2017 14:48:06 +0000 Subject: [PATCH 02/57] Ignore new rejected events when working out forward extremeties. --- synapse/storage/events.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 8659f605a5..c88f689d3a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -302,7 +302,7 @@ class EventsStore(SQLBaseStore): room_id ) new_latest_event_ids = yield self._calculate_new_extremeties( - room_id, [ev for ev, _ in ev_ctx_rm] + room_id, ev_ctx_rm, latest_event_ids ) if new_latest_event_ids == set(latest_event_ids): @@ -329,27 +329,24 @@ class EventsStore(SQLBaseStore): persist_event_counter.inc_by(len(chunk)) @defer.inlineCallbacks - def _calculate_new_extremeties(self, room_id, events): + def _calculate_new_extremeties(self, room_id, event_contexts, latest_event_ids): """Calculates the new forward extremeties for a room given events to persist. Assumes that we are only persisting events for one room at a time. """ - latest_event_ids = yield self.get_latest_event_ids_in_room( - room_id - ) new_latest_event_ids = set(latest_event_ids) # First, add all the new events to the list new_latest_event_ids.update( - event.event_id for event in events - if not event.internal_metadata.is_outlier() + event.event_id for event, ctx in event_contexts + if not event.internal_metadata.is_outlier() and not ctx.rejected ) # Now remove all events that are referenced by the to-be-added events new_latest_event_ids.difference_update( e_id - for event in events + for event, ctx in event_contexts for e_id, _ in event.prev_events - if not event.internal_metadata.is_outlier() + if not event.internal_metadata.is_outlier() and not ctx.rejected ) # And finally remove any events that are referenced by previously added From 52cd019a544b49469ce588c42a59100e6384d362 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Feb 2017 16:04:29 +0000 Subject: [PATCH 03/57] Make None check explicit --- synapse/storage/end_to_end_keys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 2040e022fa..b9f1365f92 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -93,7 +93,7 @@ class EndToEndKeyStore(SQLBaseStore): query_clause = "user_id = ?" query_params.append(user_id) - if device_id: + if device_id is not None: query_clause += " AND device_id = ?" query_params.append(device_id) From fdbd90e25dc3682c4d6aa1dd85a9e0e4fedabb1a Mon Sep 17 00:00:00 2001 From: Daniel Dent Date: Wed, 8 Feb 2017 21:21:02 -0800 Subject: [PATCH 04/57] Update CAPTCHA_SETUP.rst X-Forwarded-For docs It looks like CAPTCHA_SETUP.rst contains information relevant to an old version of Synapse, but Synapse now has a different approach to configuring use of the X-Forwarded-For header. --- docs/CAPTCHA_SETUP.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/CAPTCHA_SETUP.rst b/docs/CAPTCHA_SETUP.rst index db621aedfc..19a204d9ce 100644 --- a/docs/CAPTCHA_SETUP.rst +++ b/docs/CAPTCHA_SETUP.rst @@ -25,6 +25,5 @@ Configuring IP used for auth The ReCaptcha API requires that the IP address of the user who solved the captcha is sent. If the client is connecting through a proxy or load balancer, it may be required to use the X-Forwarded-For (XFF) header instead of the origin -IP address. This can be configured as an option on the home server like so:: - - captcha_ip_origin_is_x_forwarded: true +IP address. This can be configured using the x_forwarded directive in the +listeners section of the homeserver.yaml configuration file. From 505bfd82bbf1ff51e8f08a98c72b9d214a269d33 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Feb 2017 10:41:02 +0000 Subject: [PATCH 05/57] Update version and changelog --- CHANGES.rst | 7 +++++++ synapse/__init__.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index da241666d6..22aa7cb9b4 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,10 @@ +Changes in synapse v0.19.1 (2017-02-09) +======================================= + +* Fix bug where state was incorrectly reset in a room when synapse received an + event over federation that did not pass auth checks (PR #1892) + + Changes in synapse v0.19.0 (2017-02-04) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index d3f445be9c..da8ef90a77 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.19.0" +__version__ = "0.19.1" From a02d609b1ffdc313fc1e06a68228ad2cb45919ec Mon Sep 17 00:00:00 2001 From: Kevin Liu Date: Sat, 11 Feb 2017 20:44:16 -0500 Subject: [PATCH 06/57] Fix synapse_port_db failure (fixes #1902) See https://matrix.to/#/!cURbafjkfsMDVwdRDQ:matrix.org/$148686272020hCgRD:potatofrom.space Signed-off-by: Kevin Liu --- scripts/synapse_port_db | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 2cb2eab68b..4bd110c325 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -40,6 +40,7 @@ BOOLEAN_COLUMNS = { "presence_list": ["accepted"], "presence_stream": ["currently_active"], "public_room_list_stream": ["visibility"], + "device_list_outbound_pokes": ["sent"], } From 70a00eacf9ebdca70966fa8c343d55d6e9232973 Mon Sep 17 00:00:00 2001 From: Kevin Liu Date: Sat, 11 Feb 2017 20:49:31 -0500 Subject: [PATCH 07/57] Fix typo This is what I get for not proofreading --- scripts/synapse_port_db | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 4bd110c325..ea367a1281 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -40,7 +40,7 @@ BOOLEAN_COLUMNS = { "presence_list": ["accepted"], "presence_stream": ["currently_active"], "public_room_list_stream": ["visibility"], - "device_list_outbound_pokes": ["sent"], + "device_lists_outbound_pokes": ["sent"], } From df4407d6656a218905bc04a41a72ed8b16abf73c Mon Sep 17 00:00:00 2001 From: Tyler Smith Date: Sat, 11 Feb 2017 23:02:57 -0800 Subject: [PATCH 08/57] Fix typo in config comments. Signed-off-by: Tyler Smith --- synapse/config/tls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 3c58d2de17..e081840a83 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -95,7 +95,7 @@ class TlsConfig(Config): # make HTTPS requests to this server will check that the TLS # certificates returned by this server match one of the fingerprints. # - # Synapse automatically adds its the fingerprint of its own certificate + # Synapse automatically adds the fingerprint of its own certificate # to the list. So if federation traffic is handle directly by synapse # then no modification to the list is required. # From 6a3743b0d4c9845d3c50b2ab5fa9954647ce5962 Mon Sep 17 00:00:00 2001 From: Andrew Shadura Date: Sun, 12 Feb 2017 09:54:56 +0100 Subject: [PATCH 09/57] Use signedjson.sign instead of syutil.crypto.jsonsign Functions from syutil.crypto.jsonsign are now available in signedjson, so use that instead of depending on syutil. Signed-off-by: Andrew Shadura --- contrib/cmdclient/console.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index 8bb03ce66a..4918fa1a9a 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -32,7 +32,7 @@ import urlparse import nacl.signing import nacl.encoding -from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException +from signedjson.sign import verify_signed_json, SignatureVerifyException CONFIG_JSON = "cmdclient_config.json" From 3a46280ca3923d4f6e1103b9ee22bac0ddc2edf7 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 13 Feb 2017 11:16:53 +0000 Subject: [PATCH 10/57] Add db functions needed for room initial sync to slave --- synapse/app/synchrotron.py | 4 ++++ synapse/replication/slave/storage/account_data.py | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index b3fb408cfd..3f29595256 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -87,6 +87,10 @@ class SynchrotronSlavedStore( RoomMemberStore.__dict__["who_forgot_in_room"] ) + did_forget = ( + RoomMemberStore.__dict__["did_forget"] + ) + # XXX: This is a bit broken because we don't persist the accepted list in a # way that can be replicated. This means that we don't have a way to # invalidate the cache correctly. diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py index 735c03c7eb..77c64722c7 100644 --- a/synapse/replication/slave/storage/account_data.py +++ b/synapse/replication/slave/storage/account_data.py @@ -46,6 +46,12 @@ class SlavedAccountDataStore(BaseSlavedStore): ) get_tags_for_user = TagsStore.__dict__["get_tags_for_user"] + get_tags_for_room = ( + DataStore.get_tags_for_room.__func__ + ) + get_account_data_for_room = ( + DataStore.get_account_data_for_room.__func__ + ) get_updated_tags = DataStore.get_updated_tags.__func__ get_updated_account_data_for_user = ( From ecd7e36047d090cdb027f500b0f95a375ba61811 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 13 Feb 2017 13:16:48 +0000 Subject: [PATCH 11/57] http txns: Do not cache error responses Previously we did. This meant that, amongst other errors, rate-limiting errors would be cached and prevent messages with that txn ID being sent. --- synapse/rest/client/transactions.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index efa77b8c51..6396a08035 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -81,7 +81,16 @@ class HttpTransactionCache(object): Deferred which resolves to a tuple of (response_code, response_dict). """ try: - return self.transactions[txn_key][0].observe() + observable = self.transactions[txn_key][0] + if not observable.has_called() or observable.has_succeeded(): + return observable.observe() + # if the request has already been called with a non-2xx status + # (a Twisted failure), remove it from the transaction map. + # This is done to ensure that we don't cache rate-limiting errors, etc. + res = observable.get_result() + if res.value.code >= 300: + del self.transactions[txn_key] + # fall through except (KeyError, IndexError): pass # execute the function instead. From feb15dc99f02e6cb0a84a53e397529c51743f114 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 13 Feb 2017 13:33:12 +0000 Subject: [PATCH 12/57] Don't cache errors at all --- synapse/rest/client/transactions.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 6396a08035..95376a2fb6 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -81,16 +81,7 @@ class HttpTransactionCache(object): Deferred which resolves to a tuple of (response_code, response_dict). """ try: - observable = self.transactions[txn_key][0] - if not observable.has_called() or observable.has_succeeded(): - return observable.observe() - # if the request has already been called with a non-2xx status - # (a Twisted failure), remove it from the transaction map. - # This is done to ensure that we don't cache rate-limiting errors, etc. - res = observable.get_result() - if res.value.code >= 300: - del self.transactions[txn_key] - # fall through + return self.transactions[txn_key][0].observe() except (KeyError, IndexError): pass # execute the function instead. @@ -101,6 +92,14 @@ class HttpTransactionCache(object): # to the observers. observable = ObservableDeferred(deferred, consumeErrors=True) self.transactions[txn_key] = (observable, self.clock.time_msec()) + + # if the request fails with a Twisted failure, remove it + # from the transaction map. This is done to ensure that we don't + # cache transient errors like rate-limiting errors, etc. + def remove_from_map(err): + del self.transactions[txn_key] + return err + observable.addErrback(remove_from_map) return observable.observe() def _cleanup(self): From 808ddf0ae72ef45d887e00c07ba834d0873ceb8d Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 13 Feb 2017 13:36:15 +0000 Subject: [PATCH 13/57] Pop the txn from the map in case it has already been deleted somehow --- synapse/rest/client/transactions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 95376a2fb6..7b742ca7ae 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -97,7 +97,7 @@ class HttpTransactionCache(object): # from the transaction map. This is done to ensure that we don't # cache transient errors like rate-limiting errors, etc. def remove_from_map(err): - del self.transactions[txn_key] + self.transactions.pop(txn_key, None) return err observable.addErrback(remove_from_map) return observable.observe() From d0497425f81ed21b58046f5f8725fc70ffcc0544 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 13 Feb 2017 13:49:44 +0000 Subject: [PATCH 14/57] Ordering is important on errbacks so add the cleanup func before creating an ObservableDeferred --- synapse/rest/client/transactions.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 7b742ca7ae..fceca2edeb 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -87,19 +87,19 @@ class HttpTransactionCache(object): deferred = fn(*args, **kwargs) - # We don't add an errback to the raw deferred, so we ask ObservableDeferred - # to swallow the error. This is fine as the error will still be reported - # to the observers. - observable = ObservableDeferred(deferred, consumeErrors=True) - self.transactions[txn_key] = (observable, self.clock.time_msec()) - # if the request fails with a Twisted failure, remove it # from the transaction map. This is done to ensure that we don't # cache transient errors like rate-limiting errors, etc. def remove_from_map(err): self.transactions.pop(txn_key, None) return err - observable.addErrback(remove_from_map) + deferred.addErrback(remove_from_map) + + # We don't add any other errbacks to the raw deferred, so we ask + # ObservableDeferred to swallow the error. This is fine as the error will + # still be reported to the observers. + observable = ObservableDeferred(deferred, consumeErrors=True) + self.transactions[txn_key] = (observable, self.clock.time_msec()) return observable.observe() def _cleanup(self): From 9e617cd4c2da527caf93799e286b42352ad492d2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 13 Feb 2017 13:50:03 +0000 Subject: [PATCH 15/57] Cache get_presence storage --- synapse/handlers/presence.py | 2 +- synapse/replication/slave/storage/presence.py | 4 +++- synapse/storage/presence.py | 14 +++++++++++--- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index fdfce2a88c..da610e430f 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -531,7 +531,7 @@ class PresenceHandler(object): # There are things not in our in memory cache. Lets pull them out of # the database. res = yield self.store.get_presence_for_users(missing) - states.update({state.user_id: state for state in res}) + states.update(res) missing = [user_id for user_id, state in states.items() if not state] if missing: diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py index 703f4a49bf..40f6c9a386 100644 --- a/synapse/replication/slave/storage/presence.py +++ b/synapse/replication/slave/storage/presence.py @@ -18,6 +18,7 @@ from ._slaved_id_tracker import SlavedIdTracker from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.storage import DataStore +from synapse.storage.presence import PresenceStore class SlavedPresenceStore(BaseSlavedStore): @@ -35,7 +36,8 @@ class SlavedPresenceStore(BaseSlavedStore): _get_active_presence = DataStore._get_active_presence.__func__ take_presence_startup_info = DataStore.take_presence_startup_info.__func__ - get_presence_for_users = DataStore.get_presence_for_users.__func__ + _get_presence_for_user = PresenceStore.__dict__["_get_presence_for_user"] + get_presence_for_users = PresenceStore.__dict__["get_presence_for_users"] def get_current_presence_token(self): return self._presence_id_gen.get_current_token() diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 7460f98a1f..4d1590d2b4 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -15,7 +15,7 @@ from ._base import SQLBaseStore from synapse.api.constants import PresenceState -from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList from collections import namedtuple from twisted.internet import defer @@ -85,6 +85,9 @@ class PresenceStore(SQLBaseStore): self.presence_stream_cache.entity_has_changed, state.user_id, stream_id, ) + self._invalidate_cache_and_stream( + txn, self._get_presence_for_user, (state.user_id,) + ) # Actually insert new rows self._simple_insert_many_txn( @@ -143,7 +146,12 @@ class PresenceStore(SQLBaseStore): "get_all_presence_updates", get_all_presence_updates_txn ) - @defer.inlineCallbacks + @cached() + def _get_presence_for_user(self, user_id): + raise NotImplementedError() + + @cachedList(cached_method_name="_get_presence_for_user", list_name="user_ids", + num_args=1, inlineCallbacks=True) def get_presence_for_users(self, user_ids): rows = yield self._simple_select_many_batch( table="presence_stream", @@ -165,7 +173,7 @@ class PresenceStore(SQLBaseStore): for row in rows: row["currently_active"] = bool(row["currently_active"]) - defer.returnValue([UserPresenceState(**row) for row in rows]) + defer.returnValue({row["user_id"]: UserPresenceState(**row) for row in rows}) def get_current_presence_token(self): return self._presence_id_gen.get_current_token() From 095b45c1653bc93788a45b891f29efc30f0b1b07 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Feb 2017 18:12:53 +0000 Subject: [PATCH 16/57] Aggregate event push actions --- synapse/replication/slave/storage/events.py | 6 + synapse/storage/event_push_actions.py | 271 ++++++++++++++---- synapse/storage/receipts.py | 1 + .../schema/delta/40/event_push_summary.sql | 37 +++ tests/storage/test_event_push_actions.py | 86 ++++++ 5 files changed, 342 insertions(+), 59 deletions(-) create mode 100644 synapse/storage/schema/delta/40/event_push_summary.sql diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index d72ff6055c..622b2d8540 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -85,6 +85,12 @@ class SlavedEventStore(BaseSlavedStore): get_unread_event_push_actions_by_room_for_user = ( EventPushActionsStore.__dict__["get_unread_event_push_actions_by_room_for_user"] ) + _get_unread_counts_by_receipt_txn = ( + DataStore._get_unread_counts_by_receipt_txn.__func__ + ) + _get_unread_counts_by_pos_txn = ( + DataStore._get_unread_counts_by_pos_txn.__func__ + ) _get_state_group_for_events = ( StateStore.__dict__["_get_state_group_for_events"] ) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 522d0114cb..300571b78b 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -15,6 +15,7 @@ from ._base import SQLBaseStore from twisted.internet import defer +from synapse.util.async import sleep from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.types import RoomStreamToken from .stream import lower_bound @@ -29,7 +30,6 @@ class EventPushActionsStore(SQLBaseStore): EPA_HIGHLIGHT_INDEX = "epa_highlight_index" def __init__(self, hs): - self.stream_ordering_month_ago = None super(EventPushActionsStore, self).__init__(hs) self.register_background_index_update( @@ -47,6 +47,9 @@ class EventPushActionsStore(SQLBaseStore): where_clause="highlight=1" ) + self._doing_notif_rotation = False + self._clock.looping_call(self._rotate_notifs, 60 * 1000) + def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples): """ Args: @@ -77,67 +80,90 @@ class EventPushActionsStore(SQLBaseStore): def get_unread_event_push_actions_by_room_for_user( self, room_id, user_id, last_read_event_id ): - def _get_unread_event_push_actions_by_room(txn): - sql = ( - "SELECT stream_ordering, topological_ordering" - " FROM events" - " WHERE room_id = ? AND event_id = ?" - ) - txn.execute( - sql, (room_id, last_read_event_id) - ) - results = txn.fetchall() - if len(results) == 0: - return {"notify_count": 0, "highlight_count": 0} - - stream_ordering = results[0][0] - topological_ordering = results[0][1] - token = RoomStreamToken( - topological_ordering, stream_ordering - ) - - # First get number of notifications. - # We don't need to put a notif=1 clause as all rows always have - # notif=1 - sql = ( - "SELECT count(*)" - " FROM event_push_actions ea" - " WHERE" - " user_id = ?" - " AND room_id = ?" - " AND %s" - ) % (lower_bound(token, self.database_engine, inclusive=False),) - - txn.execute(sql, (user_id, room_id)) - row = txn.fetchone() - notify_count = row[0] if row else 0 - - # Now get the number of highlights - sql = ( - "SELECT count(*)" - " FROM event_push_actions ea" - " WHERE" - " highlight = 1" - " AND user_id = ?" - " AND room_id = ?" - " AND %s" - ) % (lower_bound(token, self.database_engine, inclusive=False),) - - txn.execute(sql, (user_id, room_id)) - row = txn.fetchone() - highlight_count = row[0] if row else 0 - - return { - "notify_count": notify_count, - "highlight_count": highlight_count, - } - ret = yield self.runInteraction( "get_unread_event_push_actions_by_room", - _get_unread_event_push_actions_by_room + self._get_unread_counts_by_receipt_txn, + room_id, user_id, last_read_event_id ) defer.returnValue(ret) + def _get_unread_counts_by_receipt_txn(self, txn, room_id, user_id, + last_read_event_id): + sql = ( + "SELECT stream_ordering, topological_ordering" + " FROM events" + " WHERE room_id = ? AND event_id = ?" + ) + txn.execute( + sql, (room_id, last_read_event_id) + ) + results = txn.fetchall() + if len(results) == 0: + return {"notify_count": 0, "highlight_count": 0} + + stream_ordering = results[0][0] + topological_ordering = results[0][1] + + return self._get_unread_counts_by_pos_txn( + txn, room_id, user_id, topological_ordering, stream_ordering + ) + + def _get_unread_counts_by_pos_txn(self, txn, room_id, user_id, topological_ordering, + stream_ordering): + token = RoomStreamToken( + topological_ordering, stream_ordering + ) + + # First get number of notifications. + # We don't need to put a notif=1 clause as all rows always have + # notif=1 + sql = ( + "SELECT count(*)" + " FROM event_push_actions ea" + " WHERE" + " user_id = ?" + " AND room_id = ?" + " AND %s" + ) % (lower_bound(token, self.database_engine, inclusive=False),) + + txn.execute(sql, (user_id, room_id)) + row = txn.fetchone() + notify_count = row[0] if row else 0 + + summary_notif_count = self._simple_select_one_onecol_txn( + txn, + table="event_push_summary", + keyvalues={ + "user_id": user_id, + "room_id": room_id, + }, + retcol="notif_count", + allow_none=True, + ) + + if summary_notif_count: + notify_count += summary_notif_count + + # Now get the number of highlights + sql = ( + "SELECT count(*)" + " FROM event_push_actions ea" + " WHERE" + " highlight = 1" + " AND user_id = ?" + " AND room_id = ?" + " AND %s" + ) % (lower_bound(token, self.database_engine, inclusive=False),) + + txn.execute(sql, (user_id, room_id)) + row = txn.fetchone() + highlight_count = row[0] if row else 0 + + return { + "notify_count": notify_count, + "highlight_count": highlight_count, + } + @defer.inlineCallbacks def get_push_action_users_in_range(self, min_stream_ordering, max_stream_ordering): def f(txn): @@ -448,7 +474,7 @@ class EventPushActionsStore(SQLBaseStore): ) def _remove_old_push_actions_before_txn(self, txn, room_id, user_id, - topological_ordering): + topological_ordering, stream_ordering): """ Purges old push actions for a user and room before a given topological_ordering. @@ -479,11 +505,16 @@ class EventPushActionsStore(SQLBaseStore): txn.execute( "DELETE FROM event_push_actions " " WHERE user_id = ? AND room_id = ? AND " - " topological_ordering < ?" + " topological_ordering <= ?" " AND ((stream_ordering < ? AND highlight = 1) or highlight = 0)", (user_id, room_id, topological_ordering, self.stream_ordering_month_ago) ) + txn.execute(""" + DELETE FROM event_push_summary + WHERE room_id = ? AND user_id = ? AND stream_ordering <= ? + """, (room_id, user_id, stream_ordering)) + @defer.inlineCallbacks def _find_stream_orderings_for_times(self): yield self.runInteraction( @@ -500,6 +531,14 @@ class EventPushActionsStore(SQLBaseStore): "Found stream ordering 1 month ago: it's %d", self.stream_ordering_month_ago ) + logger.info("Searching for stream ordering 1 day ago") + self.stream_ordering_day_ago = self._find_first_stream_ordering_after_ts_txn( + txn, self._clock.time_msec() - 24 * 60 * 60 * 1000 + ) + logger.info( + "Found stream ordering 1 day ago: it's %d", + self.stream_ordering_day_ago + ) def _find_first_stream_ordering_after_ts_txn(self, txn, ts): """ @@ -539,6 +578,120 @@ class EventPushActionsStore(SQLBaseStore): return range_end + @defer.inlineCallbacks + def _rotate_notifs(self): + if self._doing_notif_rotation or self.stream_ordering_day_ago is None: + return + self._doing_notif_rotation = True + + try: + while True: + logger.info("Rotating notifications") + + caught_up = yield self.runInteraction( + "_rotate_notifs", + self._rotate_notifs_txn + ) + if caught_up: + break + yield sleep(1) + finally: + self._doing_notif_rotation = False + + def _rotate_notifs_txn(self, txn): + """Archives older notifications into event_push_summary. Returns whether + the archiving process has caught up or not. + """ + + # We want to make sure that we only ever do this one at a time + self.database_engine.lock_table(txn, "event_push_summary") + + # We don't to try and rotate millions of rows at once, so we cap the + # maximum stream ordering we'll rotate before. + txn.execute(""" + SELECT stream_ordering FROM event_push_actions + ORDER BY stream_ordering ASC LIMIT 1 OFFSET 50000 + """) + stream_row = txn.fetchone() + if stream_row: + offset_stream_ordering, = stream_row + rotate_to_stream_ordering = min( + self.stream_ordering_day_ago, offset_stream_ordering + ) + caught_up = offset_stream_ordering >= self.stream_ordering_day_ago + else: + rotate_to_stream_ordering = self.stream_ordering_day_ago + caught_up = True + + self._rotate_notifs_before_txn(txn, rotate_to_stream_ordering) + + # We have caught up iff we were limited by `stream_ordering_day_ago` + return caught_up + + def _rotate_notifs_before_txn(self, txn, rotate_to_stream_ordering): + old_rotate_stream_ordering = self._simple_select_one_onecol_txn( + txn, + table="event_push_summary_stream_ordering", + keyvalues={}, + retcol="stream_ordering", + ) + + # Calculate the new counts that should be upserted into event_push_summary + sql = """ + SELECT user_id, room_id, + coalesce(old.notif_count, 0) + upd.notif_count, + upd.stream_ordering, + old.user_id + FROM ( + SELECT user_id, room_id, count(*) as notif_count, + max(stream_ordering) as stream_ordering + FROM event_push_actions + WHERE ? <= stream_ordering AND stream_ordering < ? + AND highlight = 0 + GROUP BY user_id, room_id + ) AS upd + LEFT JOIN event_push_summary AS old USING (user_id, room_id) + """ + + txn.execute(sql, (old_rotate_stream_ordering, rotate_to_stream_ordering,)) + rows = txn.fetchall() + + # If the `old.user_id` above is NULL then we know there isn't already an + # entry in the table, so we simply insert it. Otherwise we update the + # existing table. + self._simple_insert_many_txn( + txn, + table="event_push_summary", + values=[ + { + "user_id": row[0], + "room_id": row[1], + "notif_count": row[2], + "stream_ordering": row[3], + } + for row in rows if row[4] is None + ] + ) + + txn.executemany( + """ + UPDATE event_push_summary SET notif_count = ?, stream_ordering = ? + WHERE user_id = ? AND room_id = ? + """, + ((row[2], row[3], row[0], row[1],) for row in rows if row[4] is not None) + ) + + txn.execute( + "DELETE FROM event_push_actions" + " WHERE ? <= stream_ordering AND stream_ordering < ? AND highlight = 0", + (old_rotate_stream_ordering, rotate_to_stream_ordering,) + ) + + txn.execute( + "UPDATE event_push_summary_stream_ordering SET stream_ordering = ?", + (rotate_to_stream_ordering,) + ) + def _action_has_highlight(actions): for action in actions: diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index f72d15f5ed..5cf41501ea 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -351,6 +351,7 @@ class ReceiptsStore(SQLBaseStore): room_id=room_id, user_id=user_id, topological_ordering=topological_ordering, + stream_ordering=stream_ordering, ) return True diff --git a/synapse/storage/schema/delta/40/event_push_summary.sql b/synapse/storage/schema/delta/40/event_push_summary.sql new file mode 100644 index 0000000000..3918f0b794 --- /dev/null +++ b/synapse/storage/schema/delta/40/event_push_summary.sql @@ -0,0 +1,37 @@ +/* Copyright 2017 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Aggregate of old notification counts that have been deleted out of the +-- main event_push_actions table. This count does not include those that were +-- highlights, as they remain in the event_push_actions table. +CREATE TABLE event_push_summary ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + notif_count BIGINT NOT NULL, + stream_ordering BIGINT NOT NULL +); + +CREATE INDEX event_push_summary_user_rm ON event_push_summary(user_id, room_id); + + +-- The stream ordering up to which we have aggregated the event_push_actions +-- table into event_push_summary +CREATE TABLE event_push_summary_stream_ordering ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_ordering BIGINT NOT NULL, + CHECK (Lock='X') +); + +INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index e9044afa2e..3135488353 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -17,9 +17,15 @@ from twisted.internet import defer import tests.unittest import tests.utils +from mock import Mock USER_ID = "@user:example.com" +PlAIN_NOTIF = ["notify", {"set_tweak": "highlight", "value": False}] +HIGHLIGHT = [ + "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"} +] + class EventPushActionsStoreTestCase(tests.unittest.TestCase): @@ -39,3 +45,83 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase): yield self.store.get_unread_push_actions_for_user_in_range_for_email( USER_ID, 0, 1000, 20 ) + + @defer.inlineCallbacks + def test_count_aggregation(self): + room_id = "!foo:example.com" + user_id = "@user1235:example.com" + + @defer.inlineCallbacks + def _assert_counts(noitf_count, highlight_count): + counts = yield self.store.runInteraction( + "", self.store._get_unread_counts_by_pos_txn, + room_id, user_id, 0, 0 + ) + self.assertEquals( + counts, + {"notify_count": noitf_count, "highlight_count": highlight_count} + ) + + def _inject_actions(stream, action): + event = Mock() + event.room_id = room_id + event.event_id = "$test:example.com" + event.internal_metadata.stream_ordering = stream + event.depth = stream + + tuples = [(user_id, action)] + + return self.store.runInteraction( + "", self.store._set_push_actions_for_event_and_users_txn, + event, tuples + ) + + def _rotate(stream): + return self.store.runInteraction( + "", self.store._rotate_notifs_before_txn, stream + ) + + def _mark_read(stream, depth): + return self.store.runInteraction( + "", self.store._remove_old_push_actions_before_txn, + room_id, user_id, depth, stream + ) + + yield _assert_counts(0, 0) + yield _inject_actions(1, PlAIN_NOTIF) + yield _assert_counts(1, 0) + yield _rotate(2) + yield _assert_counts(1, 0) + + yield _inject_actions(3, PlAIN_NOTIF) + yield _assert_counts(2, 0) + yield _rotate(4) + yield _assert_counts(2, 0) + + yield _inject_actions(5, PlAIN_NOTIF) + yield _mark_read(3, 3) + yield _assert_counts(1, 0) + + yield _mark_read(5, 5) + yield _assert_counts(0, 0) + + yield _inject_actions(6, PlAIN_NOTIF) + yield _rotate(7) + + yield self.store._simple_delete( + table="event_push_actions", + keyvalues={"1": 1}, + desc="", + ) + + yield _assert_counts(1, 0) + + yield _mark_read(7, 7) + yield _assert_counts(0, 0) + + yield _inject_actions(8, HIGHLIGHT) + yield _assert_counts(1, 1) + yield _rotate(9) + yield _assert_counts(1, 1) + yield _rotate(10) + yield _assert_counts(1, 1) From ce3c8df6dff456cdc548f1bc40a47f4b2a09a0a6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Feb 2017 13:41:24 +0000 Subject: [PATCH 17/57] Less aggressive timers --- synapse/storage/event_push_actions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 300571b78b..1b7888a914 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -48,7 +48,7 @@ class EventPushActionsStore(SQLBaseStore): ) self._doing_notif_rotation = False - self._clock.looping_call(self._rotate_notifs, 60 * 1000) + self._clock.looping_call(self._rotate_notifs, 30 * 60 * 1000) def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples): """ @@ -594,7 +594,7 @@ class EventPushActionsStore(SQLBaseStore): ) if caught_up: break - yield sleep(1) + yield sleep(5) finally: self._doing_notif_rotation = False From fc2f29c1d071ff9b140f10ca46f463f6638d8357 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 14 Feb 2017 13:59:50 +0000 Subject: [PATCH 18/57] Fix bugs in the /keys/changes api * `get_forward_extremeties_for_room` takes a numeric `stream_ordering`. We were passing a `RoomStreamToken`, which meant that it returned the *current* extremities, rather than those corresponding to the `from_token`. However: * `get_state_ids_for_events` required a second ('types') parameter; this meant that a `TypeError` was thrown and we ended up acting as though there was *no* prev state. * `get_state_ids_for_events` actually returns a map from event_id to state dictionary - just looking up the state keys in it again meant that we acted as though there was no prev state. We now check if each member's state has changed since *any* of the extremities. Also add/fix some comments. --- synapse/handlers/device.py | 38 ++++++++++++++++++++++------- synapse/storage/event_federation.py | 17 ++++++++++++- synapse/storage/state.py | 14 ++++++++++- 3 files changed, 58 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 8cb47ac417..ca7137f315 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.api import errors from synapse.api.constants import EventTypes from synapse.util import stringutils @@ -246,30 +245,51 @@ class DeviceHandler(BaseHandler): # Then work out if any users have since joined rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key) + stream_ordering = RoomStreamToken.parse_stream_token( + from_token.room_key).stream + possibly_changed = set(changed) for room_id in rooms_changed: - # Fetch the current state at the time. - stream_ordering = RoomStreamToken.parse_stream_token(from_token.room_key) - + # Fetch the current state at the time. try: event_ids = yield self.store.get_forward_extremeties_for_room( room_id, stream_ordering=stream_ordering ) - prev_state_ids = yield self.store.get_state_ids_for_events(event_ids) - except: - prev_state_ids = {} + except errors.StoreError: + # we have purged the stream_ordering index since the stream + # ordering: treat it the same as a new room + event_ids = [] current_state_ids = yield self.state.get_current_state_ids(room_id) + # special-case for an empty prev state: include all members + # in the changed list + if not event_ids: + for key, event_id in current_state_ids.iteritems(): + etype, state_key = key + if etype != EventTypes.Member: + continue + possibly_changed.add(state_key) + continue + + # mapping from event_id -> state_dict + prev_state_ids = yield self.store.get_state_ids_for_events(event_ids) + # If there has been any change in membership, include them in the # possibly changed list. We'll check if they are joined below, # and we're not toooo worried about spuriously adding users. for key, event_id in current_state_ids.iteritems(): etype, state_key = key - if etype == EventTypes.Member: - prev_event_id = prev_state_ids.get(key, None) + if etype != EventTypes.Member: + continue + + # check if this member has changed since any of the extremities + # at the stream_ordering, and add them to the list if so. + for state_dict in prev_state_ids.values(): + prev_event_id = state_dict.get(key, None) if not prev_event_id or prev_event_id != event_id: possibly_changed.add(state_key) + break users_who_share_room = yield self.store.get_users_who_share_room_with_user( user_id diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index ee88c61954..256e50dc20 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -281,15 +281,30 @@ class EventFederationStore(SQLBaseStore): ) def get_forward_extremeties_for_room(self, room_id, stream_ordering): + """For a given room_id and stream_ordering, return the forward + extremeties of the room at that point in "time". + + Throws a StoreError if we have since purged the index for + stream_orderings from that point. + + Args: + room_id (str): + stream_ordering (int): + + Returns: + deferred, which resolves to a list of event_ids + """ # We want to make the cache more effective, so we clamp to the last # change before the given ordering. last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id) # We don't always have a full stream_to_exterm_id table, e.g. after # the upgrade that introduced it, so we make sure we never ask for a - # try and pin to a stream_ordering from before a restart + # stream_ordering from before a restart last_change = max(self._stream_order_on_start, last_change) + # provided the last_change is recent enough, we now clamp the requested + # stream_ordering to it. if last_change > self.stream_ordering_month_ago: stream_ordering = min(last_change, stream_ordering) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 1b3800eb6a..84482d8285 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -413,7 +413,19 @@ class StateStore(SQLBaseStore): defer.returnValue({event: event_to_state[event] for event in event_ids}) @defer.inlineCallbacks - def get_state_ids_for_events(self, event_ids, types): + def get_state_ids_for_events(self, event_ids, types=None): + """ + Get the state dicts corresponding to a list of events + + Args: + event_ids(list(str)): events whose state should be returned + types(list[(str, str)]|None): List of (type, state_key) tuples + which are used to filter the state fetched. May be None, which + matches any key + + Returns: + A deferred dict from event_id -> (type, state_key) -> state_event + """ event_to_groups = yield self._get_state_group_for_events( event_ids, ) From 355d62c499b63f337c484cca6704161a3a05da1d Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 14 Feb 2017 15:10:55 +0000 Subject: [PATCH 19/57] Make kick & ban reasons work We somehow specced APIs with reason strings, preserve the content in the events and even have the clients display them, but failed to actually pass the parameter through to the event content. --- synapse/rest/client/v1/room.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 2ebf5e59a0..728e3df0e3 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -609,6 +609,10 @@ class RoomMembershipRestServlet(ClientV1RestServlet): raise SynapseError(400, "Missing user_id key.") target = UserID.from_string(content["user_id"]) + event_content = None + if 'reason' in content and membership_action in ['kick', 'ban']: + event_content = {'reason': content['reason']} + yield self.handlers.room_member_handler.update_membership( requester=requester, target=target, @@ -616,6 +620,7 @@ class RoomMembershipRestServlet(ClientV1RestServlet): action=membership_action, txn_id=txn_id, third_party_signed=content.get("third_party_signed", None), + content=event_content, ) defer.returnValue((200, {})) From 474c9aadbe542440bb7cd764a654518b5c36f851 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 15 Feb 2017 19:32:20 +0000 Subject: [PATCH 20/57] Allow forgetting rooms you're banned from --- synapse/handlers/room_member.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index b2806555cf..2052d6d05f 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -719,7 +719,9 @@ class RoomMemberHandler(BaseHandler): ) membership = member.membership if member else None - if membership is not None and membership != Membership.LEAVE: + if membership is not None and membership not in [ + Membership.LEAVE, Membership.BAN + ]: raise SynapseError(400, "User %s in room %s" % ( user_id, room_id )) From e6acf0c399b1baadc56f57d8398643a4cb1de56a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Feb 2017 16:37:25 +0000 Subject: [PATCH 21/57] Store the default push actions in a more efficient manner --- synapse/storage/event_push_actions.py | 51 +++++++++++++++++++++------ 1 file changed, 40 insertions(+), 11 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 1b7888a914..db20b7de20 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -26,6 +26,32 @@ import ujson as json logger = logging.getLogger(__name__) +DEFAULT_NOTIF_ACITON = ["notify", {"set_tweak": "highlight", "value": False}] +DEFAULT_HIGHLIGHT_ACITON = [ + "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"} +] + + +def _serialize_action(actions, is_highlight): + if is_highlight: + if actions == DEFAULT_HIGHLIGHT_ACITON: + return "" + else: + if actions == DEFAULT_NOTIF_ACITON: + return "" + return json.dumps(actions) + + +def _deserialize_action(actions, is_highlight): + if actions: + return json.loads(actions) + + if is_highlight: + return DEFAULT_HIGHLIGHT_ACITON + else: + return DEFAULT_NOTIF_ACITON + + class EventPushActionsStore(SQLBaseStore): EPA_HIGHLIGHT_INDEX = "epa_highlight_index" @@ -58,15 +84,17 @@ class EventPushActionsStore(SQLBaseStore): """ values = [] for uid, actions in tuples: + is_highlight = 1 if _action_has_highlight(actions) else 0 + values.append({ 'room_id': event.room_id, 'event_id': event.event_id, 'user_id': uid, - 'actions': json.dumps(actions), + 'actions': _serialize_action(actions, is_highlight), 'stream_ordering': event.internal_metadata.stream_ordering, 'topological_ordering': event.depth, 'notif': 1, - 'highlight': 1 if _action_has_highlight(actions) else 0, + 'highlight': is_highlight, }) for uid, __ in tuples: @@ -202,7 +230,8 @@ class EventPushActionsStore(SQLBaseStore): # find rooms that have a read receipt in them and return the next # push actions sql = ( - "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions" + "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," + " ep.highlight " " FROM (" " SELECT room_id," " MAX(topological_ordering) as topological_ordering," @@ -243,7 +272,7 @@ class EventPushActionsStore(SQLBaseStore): def get_no_receipt(txn): sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," - " e.received_ts" + " ep.highlight " " FROM event_push_actions AS ep" " INNER JOIN events AS e USING (room_id, event_id)" " WHERE" @@ -272,7 +301,7 @@ class EventPushActionsStore(SQLBaseStore): "event_id": row[0], "room_id": row[1], "stream_ordering": row[2], - "actions": json.loads(row[3]), + "actions": _deserialize_action(row[3], row[4]), } for row in after_read_receipt + no_read_receipt ] @@ -311,7 +340,7 @@ class EventPushActionsStore(SQLBaseStore): def get_after_receipt(txn): sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," - " e.received_ts" + " ep.highlight, e.received_ts" " FROM (" " SELECT room_id," " MAX(topological_ordering) as topological_ordering," @@ -353,7 +382,7 @@ class EventPushActionsStore(SQLBaseStore): def get_no_receipt(txn): sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," - " e.received_ts" + " ep.highlight, e.received_ts" " FROM event_push_actions AS ep" " INNER JOIN events AS e USING (room_id, event_id)" " WHERE" @@ -383,8 +412,8 @@ class EventPushActionsStore(SQLBaseStore): "event_id": row[0], "room_id": row[1], "stream_ordering": row[2], - "actions": json.loads(row[3]), - "received_ts": row[4], + "actions": _deserialize_action(row[3], row[4]), + "received_ts": row[5], } for row in after_read_receipt + no_read_receipt ] @@ -418,7 +447,7 @@ class EventPushActionsStore(SQLBaseStore): sql = ( "SELECT epa.event_id, epa.room_id," " epa.stream_ordering, epa.topological_ordering," - " epa.actions, epa.profile_tag, e.received_ts" + " epa.actions, epa.highlight, epa.profile_tag, e.received_ts" " FROM event_push_actions epa, events e" " WHERE epa.event_id = e.event_id" " AND epa.user_id = ? %s" @@ -433,7 +462,7 @@ class EventPushActionsStore(SQLBaseStore): "get_push_actions_for_user", f ) for pa in push_actions: - pa["actions"] = json.loads(pa["actions"]) + pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"]) defer.returnValue(push_actions) @defer.inlineCallbacks From 502ae6c663e9b96950d1b474fbec7cf200e4e8a1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Feb 2017 16:54:37 +0000 Subject: [PATCH 22/57] Comment --- synapse/storage/event_push_actions.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index db20b7de20..ea6722df63 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -26,30 +26,37 @@ import ujson as json logger = logging.getLogger(__name__) -DEFAULT_NOTIF_ACITON = ["notify", {"set_tweak": "highlight", "value": False}] -DEFAULT_HIGHLIGHT_ACITON = [ +DEFAULT_NOTIF_ACTION = ["notify", {"set_tweak": "highlight", "value": False}] +DEFAULT_HIGHLIGHT_ACTION = [ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"} ] def _serialize_action(actions, is_highlight): + """Custom serializer for actions. This allows us to "compress" common actions. + + We use the fact that most users have the same actions for notifs (and for + highlights). We replaces these default actions with the emtpy string. + """ if is_highlight: - if actions == DEFAULT_HIGHLIGHT_ACITON: - return "" + if actions == DEFAULT_HIGHLIGHT_ACTION: + return "" # We use empty string as the column is non-NULL else: - if actions == DEFAULT_NOTIF_ACITON: + if actions == DEFAULT_NOTIF_ACTION: return "" return json.dumps(actions) def _deserialize_action(actions, is_highlight): + """Custom deserializer for actions. This allows us to "compress" common actions + """ if actions: return json.loads(actions) if is_highlight: - return DEFAULT_HIGHLIGHT_ACITON + return DEFAULT_HIGHLIGHT_ACTION else: - return DEFAULT_NOTIF_ACITON + return DEFAULT_NOTIF_ACTION class EventPushActionsStore(SQLBaseStore): From 138e030cfe6e00956d79eed0702ff3f284692357 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 16 Feb 2017 15:03:36 +0000 Subject: [PATCH 23/57] Comment --- synapse/storage/event_push_actions.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index ea6722df63..808c9d22fc 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -36,7 +36,10 @@ def _serialize_action(actions, is_highlight): """Custom serializer for actions. This allows us to "compress" common actions. We use the fact that most users have the same actions for notifs (and for - highlights). We replaces these default actions with the emtpy string. + highlights). + We store these default actions as the empty string rather than the full JSON. + Since the empty string isn't valid JSON there is no risk of this clashing with + any real JSON actions """ if is_highlight: if actions == DEFAULT_HIGHLIGHT_ACTION: From b4017539d49b16e2d91ca8e7d312c9da4dbf3ae7 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 17 Feb 2017 10:42:57 +0000 Subject: [PATCH 24/57] Make the pushers lang field column longer To accommodate things like zh-Hans-CN Fixes https://github.com/vector-im/riot-ios/issues/1031 --- synapse/storage/schema/delta/40/pushers.sql | 39 +++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 synapse/storage/schema/delta/40/pushers.sql diff --git a/synapse/storage/schema/delta/40/pushers.sql b/synapse/storage/schema/delta/40/pushers.sql new file mode 100644 index 0000000000..7e34927efa --- /dev/null +++ b/synapse/storage/schema/delta/40/pushers.sql @@ -0,0 +1,39 @@ +/* Copyright 2017 Vector Creations Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS pushers2 ( + id BIGINT PRIMARY KEY, + user_name TEXT NOT NULL, + access_token BIGINT DEFAULT NULL, + profile_tag VARCHAR(32) NOT NULL, + kind VARCHAR(8) NOT NULL, + app_id VARCHAR(64) NOT NULL, + app_display_name VARCHAR(64) NOT NULL, + device_display_name VARCHAR(128) NOT NULL, + pushkey TEXT NOT NULL, + ts BIGINT NOT NULL, + lang VARCHAR(16), + data TEXT, + last_stream_ordering INTEGER, + last_success BIGINT, + failing_since BIGINT, + UNIQUE (app_id, pushkey, user_name) +); + +INSERT INTO pushers2 SELECT * FROM PUSHERS; + +DROP TABLE PUSHERS; + +ALTER TABLE pushers2 RENAME TO pushers; From 4aa29508af2ac9ac07f91a33d5d682b4b5815a9c Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 17 Feb 2017 10:51:49 +0000 Subject: [PATCH 25/57] Use TEXT rather than VARCHAR While we're changing anyway --- synapse/storage/schema/delta/40/pushers.sql | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/storage/schema/delta/40/pushers.sql b/synapse/storage/schema/delta/40/pushers.sql index 7e34927efa..054a223f14 100644 --- a/synapse/storage/schema/delta/40/pushers.sql +++ b/synapse/storage/schema/delta/40/pushers.sql @@ -17,14 +17,14 @@ CREATE TABLE IF NOT EXISTS pushers2 ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, access_token BIGINT DEFAULT NULL, - profile_tag VARCHAR(32) NOT NULL, - kind VARCHAR(8) NOT NULL, - app_id VARCHAR(64) NOT NULL, - app_display_name VARCHAR(64) NOT NULL, - device_display_name VARCHAR(128) NOT NULL, + profile_tag TEXT NOT NULL, + kind TEXT NOT NULL, + app_id TEXT NOT NULL, + app_display_name TEXT NOT NULL, + device_display_name TEXT NOT NULL, pushkey TEXT NOT NULL, ts BIGINT NOT NULL, - lang VARCHAR(16), + lang TEXT, data TEXT, last_stream_ordering INTEGER, last_success BIGINT, From 5aae844e608554bdf5fd4487c729dd978b6eeffc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 17 Feb 2017 12:48:53 +0000 Subject: [PATCH 26/57] Add an example log_config file --- contrib/example_log_config.yaml | 48 +++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 contrib/example_log_config.yaml diff --git a/contrib/example_log_config.yaml b/contrib/example_log_config.yaml new file mode 100644 index 0000000000..c582af42d1 --- /dev/null +++ b/contrib/example_log_config.yaml @@ -0,0 +1,48 @@ +# Example log_config file for synapse. To enable, point `log_config` to it in +# `homeserver.yaml`, and restart synapse. +# +# This configuration will produce similar results to the defaults within +# synapse, but can be edited to give more flexibility. + +version: 1 + +formatters: + fmt: + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s' + +filters: + context: + (): synapse.util.logcontext.LoggingContextFilter + request: "" + +handlers: + # example output to console + console: + class: logging.StreamHandler + filters: [context] + + # example output to file - to enable, edit 'root' config below. + file: + class: logging.handlers.RotatingFileHandler + formatter: fmt + filename: /var/log/synapse/homeserver.log + maxBytes: 100000000 + backupCount: 3 + filters: [context] + + +root: + level: INFO + handlers: [console] # to use file handler instead, switch to [file] + +loggers: + synapse: + level: INFO + + synapse.storage: + level: INFO + + # example of enabling debugging for a component: + # + # synapse.federation.transport.server: + # level: DEBUG \ No newline at end of file From 66eb0bd548afd87f08f7e6e994774fe29589babe Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 17 Feb 2017 12:55:36 +0000 Subject: [PATCH 27/57] Update example_log_config.yaml add trailing NL --- contrib/example_log_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/example_log_config.yaml b/contrib/example_log_config.yaml index c582af42d1..7f7c8ba588 100644 --- a/contrib/example_log_config.yaml +++ b/contrib/example_log_config.yaml @@ -45,4 +45,4 @@ loggers: # example of enabling debugging for a component: # # synapse.federation.transport.server: - # level: DEBUG \ No newline at end of file + # level: DEBUG From 699be7d1be5238172677c98078c2a973ab28fb9d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 18 Feb 2017 14:41:31 +0000 Subject: [PATCH 28/57] Fix up notif rotation --- synapse/storage/event_push_actions.py | 36 ++++++++++++++++----------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 808c9d22fc..52d2bd3ffb 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -168,19 +168,13 @@ class EventPushActionsStore(SQLBaseStore): row = txn.fetchone() notify_count = row[0] if row else 0 - summary_notif_count = self._simple_select_one_onecol_txn( - txn, - table="event_push_summary", - keyvalues={ - "user_id": user_id, - "room_id": room_id, - }, - retcol="notif_count", - allow_none=True, - ) - - if summary_notif_count: - notify_count += summary_notif_count + txn.execute(""" + SELECT notif_count FROM event_push_summary + WHERE room_id = ? AND user_id = ? AND stream_ordering > ? + """, (room_id, user_id, stream_ordering,)) + rows = txn.fetchall() + if rows: + notify_count += rows[0][0] # Now get the number of highlights sql = ( @@ -645,12 +639,20 @@ class EventPushActionsStore(SQLBaseStore): # We want to make sure that we only ever do this one at a time self.database_engine.lock_table(txn, "event_push_summary") + old_rotate_stream_ordering = self._simple_select_one_onecol_txn( + txn, + table="event_push_summary_stream_ordering", + keyvalues={}, + retcol="stream_ordering", + ) + # We don't to try and rotate millions of rows at once, so we cap the # maximum stream ordering we'll rotate before. txn.execute(""" SELECT stream_ordering FROM event_push_actions + WHERE stream_ordering > ? ORDER BY stream_ordering ASC LIMIT 1 OFFSET 50000 - """) + """, (old_rotate_stream_ordering,)) stream_row = txn.fetchone() if stream_row: offset_stream_ordering, = stream_row @@ -662,6 +664,8 @@ class EventPushActionsStore(SQLBaseStore): rotate_to_stream_ordering = self.stream_ordering_day_ago caught_up = True + logger.info("Rotating notifications up to: %s", rotate_to_stream_ordering) + self._rotate_notifs_before_txn(txn, rotate_to_stream_ordering) # We have caught up iff we were limited by `stream_ordering_day_ago` @@ -695,6 +699,8 @@ class EventPushActionsStore(SQLBaseStore): txn.execute(sql, (old_rotate_stream_ordering, rotate_to_stream_ordering,)) rows = txn.fetchall() + logger.info("Rotating notifications, handling %d rows", len(rows)) + # If the `old.user_id` above is NULL then we know there isn't already an # entry in the table, so we simply insert it. Otherwise we update the # existing table. @@ -726,6 +732,8 @@ class EventPushActionsStore(SQLBaseStore): (old_rotate_stream_ordering, rotate_to_stream_ordering,) ) + logger.info("Rotating notifications, deleted %s push actions", txn.rowcount) + txn.execute( "UPDATE event_push_summary_stream_ordering SET stream_ordering = ?", (rotate_to_stream_ordering,) From 7efb38d1dd058dcb9e7051aeaf2019520101576d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sun, 19 Feb 2017 22:55:48 +0000 Subject: [PATCH 29/57] Update metrics-howto.rst --- docs/metrics-howto.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index ca10799b00..56e69c1859 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -9,11 +9,13 @@ How to monitor Synapse metrics using Prometheus prometheus itself defaults to 9090, so starting just above that for locally monitored services seems reasonable. E.g. 9092: - Add to homeserver.yaml + Add to homeserver.yaml:: metrics_port: 9092 - Restart synapse + Also ensure that ``enable_metrics`` is set to ``True``. + + Restart synapse. 3: Add a prometheus target for synapse. It needs to set the ``metrics_path`` to a non-default value:: From e556aefe0a0ac9e09429aed4436cd34aa6e586c0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sun, 19 Feb 2017 23:06:08 +0000 Subject: [PATCH 30/57] Update metrics-howto.rst --- docs/metrics-howto.rst | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 56e69c1859..231c680786 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -1,24 +1,27 @@ How to monitor Synapse metrics using Prometheus =============================================== -1: Install prometheus: - Follow instructions at http://prometheus.io/docs/introduction/install/ +1. Install prometheus: -2: Enable synapse metrics: - Simply setting a (local) port number will enable it. Pick a port. - prometheus itself defaults to 9090, so starting just above that for - locally monitored services seems reasonable. E.g. 9092: + Follow instructions at http://prometheus.io/docs/introduction/install/ - Add to homeserver.yaml:: +2. Enable synapse metrics: - metrics_port: 9092 + Simply setting a (local) port number will enable it. Pick a port. + prometheus itself defaults to 9090, so starting just above that for + locally monitored services seems reasonable. E.g. 9092: - Also ensure that ``enable_metrics`` is set to ``True``. + Add to homeserver.yaml:: + + metrics_port: 9092 + + Also ensure that ``enable_metrics`` is set to ``True``. - Restart synapse. + Restart synapse. -3: Add a prometheus target for synapse. It needs to set the ``metrics_path`` - to a non-default value:: +3. Add a prometheus target for synapse. + + It needs to set the ``metrics_path`` to a non-default value:: - job_name: "synapse" metrics_path: "/_synapse/metrics" @@ -26,6 +29,9 @@ How to monitor Synapse metrics using Prometheus - targets: "my.server.here:9092" + If your prometheus is older than 1.5.2, you will need to replace + ``static_configs`` in the above with ``target_groups``. + Standard Metric Names --------------------- From 6184f6fcbcc3c37e915596a6e2013f6983c9e260 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sun, 19 Feb 2017 23:06:45 +0000 Subject: [PATCH 31/57] Update metrics-howto.rst --- docs/metrics-howto.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 231c680786..7390ab85c9 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -31,6 +31,8 @@ How to monitor Synapse metrics using Prometheus If your prometheus is older than 1.5.2, you will need to replace ``static_configs`` in the above with ``target_groups``. + + Restart prometheus. Standard Metric Names --------------------- From 7f026792e12b7ca0d24ca4c8ed9ed239bf8a99ac Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Feb 2017 14:54:50 +0000 Subject: [PATCH 32/57] Fix /context/ visibiltiy rules --- synapse/handlers/room.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7e7671c9a2..73bc73a45e 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -375,12 +375,15 @@ class RoomContextHandler(BaseHandler): now_token = yield self.hs.get_event_sources().get_current_token() + users = yield self.store.get_users_in_room(room_id) + is_peeking = user.to_string() not in users + def filter_evts(events): return filter_events_for_client( self.store, user.to_string(), events, - is_peeking=is_guest + is_peeking=is_peeking ) event = yield self.store.get_event(event_id, get_prev_content=True, From 17673404fbb1ca7b84efe7ca12955754bf366b09 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Feb 2017 15:02:01 +0000 Subject: [PATCH 33/57] Remove unused param --- synapse/handlers/room.py | 2 +- synapse/rest/client/v1/room.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 73bc73a45e..99cb7db0db 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -356,7 +356,7 @@ class RoomCreationHandler(BaseHandler): class RoomContextHandler(BaseHandler): @defer.inlineCallbacks - def get_event_context(self, user, room_id, event_id, limit, is_guest): + def get_event_context(self, user, room_id, event_id, limit): """Retrieves events, pagination tokens and state around a given event in a room. diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 728e3df0e3..90242a6bac 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -505,7 +505,6 @@ class RoomEventContext(ClientV1RestServlet): room_id, event_id, limit, - requester.is_guest, ) if not results: From efff39c03013f4cd5303466440d84484f338cb57 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Feb 2017 14:54:50 +0000 Subject: [PATCH 34/57] Fix /context/ visibiltiy rules --- synapse/handlers/room.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7e7671c9a2..73bc73a45e 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -375,12 +375,15 @@ class RoomContextHandler(BaseHandler): now_token = yield self.hs.get_event_sources().get_current_token() + users = yield self.store.get_users_in_room(room_id) + is_peeking = user.to_string() not in users + def filter_evts(events): return filter_events_for_client( self.store, user.to_string(), events, - is_peeking=is_guest + is_peeking=is_peeking ) event = yield self.store.get_event(event_id, get_prev_content=True, From 6226a27bf8ff4a427888110dd9d05ea969885003 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Feb 2017 15:02:01 +0000 Subject: [PATCH 35/57] Remove unused param --- synapse/handlers/room.py | 2 +- synapse/rest/client/v1/room.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 73bc73a45e..99cb7db0db 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -356,7 +356,7 @@ class RoomCreationHandler(BaseHandler): class RoomContextHandler(BaseHandler): @defer.inlineCallbacks - def get_event_context(self, user, room_id, event_id, limit, is_guest): + def get_event_context(self, user, room_id, event_id, limit): """Retrieves events, pagination tokens and state around a given event in a room. diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 2ebf5e59a0..6554f57df1 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -505,7 +505,6 @@ class RoomEventContext(ClientV1RestServlet): room_id, event_id, limit, - requester.is_guest, ) if not results: From 0c4cf9372b3200325109dc2e4975197aa9b7b7ca Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 20 Feb 2017 16:43:29 +0000 Subject: [PATCH 36/57] Fix a race in transaction queue It was theoretically possible for a PDU to get queued and not sent for ages. On closer inspection I think there were bigger problems elsewhere, but we might as well fix this since it's easy. --- synapse/federation/transaction_queue.py | 30 +++++++++++++++++-------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index bb3d9258a6..90235ff098 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -303,18 +303,10 @@ class TransactionQueue(object): try: self.pending_transactions[destination] = 1 + # XXX: what's this for? yield run_on_reactor() while True: - pending_pdus = self.pending_pdus_by_dest.pop(destination, []) - pending_edus = self.pending_edus_by_dest.pop(destination, []) - pending_presence = self.pending_presence_by_dest.pop(destination, {}) - pending_failures = self.pending_failures_by_dest.pop(destination, []) - - pending_edus.extend( - self.pending_edus_keyed_by_dest.pop(destination, {}).values() - ) - limiter = yield get_retry_limiter( destination, self.clock, @@ -326,6 +318,24 @@ class TransactionQueue(object): yield self._get_new_device_messages(destination) ) + # BEGIN CRITICAL SECTION + # + # In order to avoid a race condition, we need to make sure that + # the following code (from popping the queues up to the point + # where we decide if we actually have any pending messages) is + # atomic - otherwise new PDUs or EDUs might arrive in the + # meantime, but not get sent because we hold the + # pending_transactions flag. + + pending_pdus = self.pending_pdus_by_dest.pop(destination, []) + pending_edus = self.pending_edus_by_dest.pop(destination, []) + pending_presence = self.pending_presence_by_dest.pop(destination, {}) + pending_failures = self.pending_failures_by_dest.pop(destination, []) + + pending_edus.extend( + self.pending_edus_keyed_by_dest.pop(destination, {}).values() + ) + pending_edus.extend(device_message_edus) if pending_presence: pending_edus.append( @@ -355,6 +365,8 @@ class TransactionQueue(object): ) return + # END CRITICAL SECTION + success = yield self._send_new_transaction( destination, pending_pdus, pending_edus, pending_failures, limiter=limiter, From 30ecfef5a394a6ce1537c171b230d51d0e79197a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 Feb 2017 13:35:30 +0000 Subject: [PATCH 37/57] Bump version and changelog --- CHANGES.rst | 7 +++++++ synapse/__init__.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 22aa7cb9b4..c8856d9575 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,10 @@ +Changes in synapse v0.19.2 (2017-02-20) +======================================= + +* Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for + pointing it out! (PR #1929) + + Changes in synapse v0.19.1 (2017-02-09) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index da8ef90a77..ff251ce597 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.19.1" +__version__ = "0.19.2" From b7442c3e2b333616093af76ab7848fa82b6490a9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 Feb 2017 13:59:25 +0000 Subject: [PATCH 38/57] Store looping call --- synapse/storage/event_push_actions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 52d2bd3ffb..cde141e20d 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -84,7 +84,9 @@ class EventPushActionsStore(SQLBaseStore): ) self._doing_notif_rotation = False - self._clock.looping_call(self._rotate_notifs, 30 * 60 * 1000) + self._rotate_notif_loop = self._clock.looping_call( + self._rotate_notifs, 30 * 60 * 1000 + ) def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples): """ From 7455ba436a2903c8e6c9086c9ebe89f4c88af946 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 22 Feb 2017 12:08:14 +0000 Subject: [PATCH 39/57] Ensure we pass positive ints to delay function --- synapse/push/emailpusher.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 2eb325c7c7..c7afd11111 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -218,7 +218,8 @@ class EmailPusher(object): ) def seconds_until(self, ts_msec): - return (ts_msec - self.clock.time_msec()) / 1000 + secs = (ts_msec - self.clock.time_msec()) / 1000 + return max(secs, 0) def get_room_throttle_ms(self, room_id): if room_id in self.throttle_params: From b2d20e94fa0a2efa84a084ddededb52d89af64c6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 22 Feb 2017 14:23:54 +0000 Subject: [PATCH 40/57] Remove lock from rotate notifs --- synapse/storage/event_push_actions.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index cde141e20d..14543b4269 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -638,9 +638,6 @@ class EventPushActionsStore(SQLBaseStore): the archiving process has caught up or not. """ - # We want to make sure that we only ever do this one at a time - self.database_engine.lock_table(txn, "event_push_summary") - old_rotate_stream_ordering = self._simple_select_one_onecol_txn( txn, table="event_push_summary_stream_ordering", From 1a4f8022e6312856b0888639d224e84d505202eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 23 Feb 2017 11:15:31 +0000 Subject: [PATCH 41/57] Strip newlines from SQL queries --- synapse/storage/_base.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b0dc391190..557701d0c4 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -80,7 +80,13 @@ class LoggingTransaction(object): def executemany(self, sql, *args): self._do_execute(self.txn.executemany, sql, *args) + def _make_sql_one_line(self, sql): + "Strip newlines out of SQL so that the loggers in the DB are on one line" + return " ".join(l.strip() for l in sql.splitlines() if l.strip()) + def _do_execute(self, func, sql, *args): + sql = self._make_sql_one_line(sql) + # TODO(paul): Maybe use 'info' and 'debug' for values? sql_logger.debug("[SQL] {%s} %s", self.name, sql) From aea546148879f7e376c626346eb789308d089249 Mon Sep 17 00:00:00 2001 From: Jurek Date: Fri, 24 Feb 2017 22:42:38 +0100 Subject: [PATCH 42/57] Fix dynamic thumbnails aspect --- synapse/rest/media/v1/media_repository.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 3cbeca503c..481ffee200 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -240,6 +240,9 @@ class MediaRepository(object): if t_method == "crop": t_len = thumbnailer.crop(t_path, t_width, t_height, t_type) elif t_method == "scale": + t_width, t_height = thumbnailer.aspect(t_width, t_height) + t_width = min(m_width, t_width) + t_height = min(m_height, t_height) t_len = thumbnailer.scale(t_path, t_width, t_height, t_type) else: t_len = None From c80439a320f00383d002aca966ff77979d614f4b Mon Sep 17 00:00:00 2001 From: Sean Enck Date: Mon, 27 Feb 2017 10:06:15 -0500 Subject: [PATCH 43/57] the aur package is no longer there, community package in arch does exist --- README.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 77e0b470a3..a73c71c77e 100644 --- a/README.rst +++ b/README.rst @@ -332,9 +332,8 @@ https://obs.infoserver.lv/project/monitor/matrix-synapse ArchLinux --------- -The quickest way to get up and running with ArchLinux is probably with Ivan -Shapovalov's AUR package from -https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all +The quickest way to get up and running with ArchLinux is probably with the community package +https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in all the necessary dependencies. Alternatively, to install using pip a few changes may be needed as ArchLinux From f58dbb02a6c7a65abb4908f99c68369127e950a9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Feb 2017 16:22:12 +0000 Subject: [PATCH 44/57] Cache get_user_devices_from_cache --- synapse/storage/devices.py | 118 +++++++++++++++++++++++-------------- 1 file changed, 73 insertions(+), 45 deletions(-) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 8e17800364..d22db0a0b9 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -19,6 +19,8 @@ from twisted.internet import defer from synapse.api.errors import StoreError from ._base import SQLBaseStore +from synapse.util.caches.descriptors import cached, cachedList, cachedInlineCallbacks + logger = logging.getLogger(__name__) @@ -144,6 +146,7 @@ class DeviceStore(SQLBaseStore): defer.returnValue({d["device_id"]: d for d in devices}) + @cached(max_entries=10000) def get_device_list_last_stream_id_for_remote(self, user_id): """Get the last stream_id we got for a user. May be None if we haven't got any information for them. @@ -156,16 +159,36 @@ class DeviceStore(SQLBaseStore): allow_none=True, ) + @cachedList(cached_method_name="get_device_list_last_stream_id_for_remote", + list_name="user_ids", inlineCallbacks=True) + def get_device_list_last_stream_id_for_remotes(self, user_ids): + rows = yield self._simple_select_many_batch( + table="device_lists_remote_extremeties", + column="user_id", + iterable=user_ids, + retcols=("user_id", "stream_id",), + desc="get_user_devices_from_cache", + ) + + results = {user_id: None for user_id in user_ids} + results.update({ + row["user_id"]: row["stream_id"] for row in rows + }) + + defer.returnValue(results) + + @defer.inlineCallbacks def mark_remote_user_device_list_as_unsubscribed(self, user_id): """Mark that we no longer track device lists for remote user. """ - return self._simple_delete( + yield self._simple_delete( table="device_lists_remote_extremeties", keyvalues={ "user_id": user_id, }, desc="mark_remote_user_device_list_as_unsubscribed", ) + self.get_device_list_last_stream_id_for_remote.invalidate((user_id,)) def update_remote_device_list_cache_entry(self, user_id, device_id, content, stream_id): @@ -191,6 +214,12 @@ class DeviceStore(SQLBaseStore): } ) + txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id,)) + txn.call_after(self._get_cached_devices_for_user.invalidate, (user_id,)) + txn.call_after( + self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,) + ) + self._simple_upsert_txn( txn, table="device_lists_remote_extremeties", @@ -234,6 +263,12 @@ class DeviceStore(SQLBaseStore): ] ) + txn.call_after(self._get_cached_devices_for_user.invalidate, (user_id,)) + txn.call_after(self._get_cached_user_device.invalidate_many, (user_id,)) + txn.call_after( + self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,) + ) + self._simple_upsert_txn( txn, table="device_lists_remote_extremeties", @@ -320,6 +355,7 @@ class DeviceStore(SQLBaseStore): return (now_stream_id, results) + @defer.inlineCallbacks def get_user_devices_from_cache(self, query_list): """Get the devices (and keys if any) for remote users from the cache. @@ -332,27 +368,11 @@ class DeviceStore(SQLBaseStore): a set of user_ids and results_map is a mapping of user_id -> device_id -> device_info """ - return self.runInteraction( - "get_user_devices_from_cache", self._get_user_devices_from_cache_txn, - query_list, + user_ids = set(user_id for user_id, _ in query_list) + user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids)) + user_ids_in_cache = set( + user_id for user_id, stream_id in user_map.items() if stream_id ) - - def _get_user_devices_from_cache_txn(self, txn, query_list): - user_ids = {user_id for user_id, _ in query_list} - - user_ids_in_cache = set() - for user_id in user_ids: - stream_ids = self._simple_select_onecol_txn( - txn, - table="device_lists_remote_extremeties", - keyvalues={ - "user_id": user_id, - }, - retcol="stream_id", - ) - if stream_ids: - user_ids_in_cache.add(user_id) - user_ids_not_in_cache = user_ids - user_ids_in_cache results = {} @@ -361,32 +381,40 @@ class DeviceStore(SQLBaseStore): continue if device_id: - content = self._simple_select_one_onecol_txn( - txn, - table="device_lists_remote_cache", - keyvalues={ - "user_id": user_id, - "device_id": device_id, - }, - retcol="content", - ) - results.setdefault(user_id, {})[device_id] = json.loads(content) + device = yield self._get_cached_user_device(user_id, device_id) + results.setdefault(user_id, {})[device_id] = device else: - devices = self._simple_select_list_txn( - txn, - table="device_lists_remote_cache", - keyvalues={ - "user_id": user_id, - }, - retcols=("device_id", "content"), - ) - results[user_id] = { - device["device_id"]: json.loads(device["content"]) - for device in devices - } - user_ids_in_cache.discard(user_id) + results[user_id] = yield self._get_cached_devices_for_user(user_id) - return user_ids_not_in_cache, results + defer.returnValue((user_ids_not_in_cache, results)) + + @cachedInlineCallbacks(num_args=2, tree=True) + def _get_cached_user_device(self, user_id, device_id): + content = yield self._simple_select_one_onecol( + table="device_lists_remote_cache", + keyvalues={ + "user_id": user_id, + "device_id": device_id, + }, + retcol="content", + desc="_get_cached_user_device", + ) + defer.returnValue(json.loads(content)) + + @cachedInlineCallbacks() + def _get_cached_devices_for_user(self, user_id): + devices = yield self._simple_select_list( + table="device_lists_remote_cache", + keyvalues={ + "user_id": user_id, + }, + retcols=("device_id", "content"), + desc="_get_cached_devices_for_user", + ) + defer.returnValue({ + device["device_id"]: json.loads(device["content"]) + for device in devices + }) def get_devices_with_keys_by_user(self, user_id): """Get all devices (with any device keys) for a user From 49f4bc470993136ee4f5af7c34bee5ddc22768bd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Feb 2017 18:33:41 +0000 Subject: [PATCH 45/57] Don't fetch current state in common case Currently we fetch the list of current state events whenever we send something in a room. This is overkill for the common case of persisting a simple chain of non-state events, so lets handle that case specially. --- synapse/storage/events.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c88f689d3a..a4ce71cb27 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -311,6 +311,23 @@ class EventsStore(SQLBaseStore): new_forward_extremeties[room_id] = new_latest_event_ids + len_1 = ( + len(latest_event_ids) == 1 + and len(new_latest_event_ids) == 1 + ) + if len_1: + all_single_prev_not_state = any( + len(event.prev_events) == 1 + and not event.is_state() + for event, ctx in ev_ctx_rm + if not event.internal_metadata.is_outlier() + and not ctx.rejected + ) + # Don't bother calculating state if they're just + # a long chain of single ancestor non-state events. + if all_single_prev_not_state: + continue + state = yield self._calculate_state_delta( room_id, ev_ctx_rm, new_latest_event_ids ) From c0d6045776909a83181a4a925198c58e625ea4a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Feb 2017 18:45:24 +0000 Subject: [PATCH 46/57] It should be all --- synapse/storage/events.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a4ce71cb27..08807deba9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -316,7 +316,7 @@ class EventsStore(SQLBaseStore): and len(new_latest_event_ids) == 1 ) if len_1: - all_single_prev_not_state = any( + all_single_prev_not_state = all( len(event.prev_events) == 1 and not event.is_state() for event, ctx in ev_ctx_rm From a41dce8f8ae8bfb957a5830783e00437b4636c10 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Feb 2017 18:54:43 +0000 Subject: [PATCH 47/57] Remove needless check --- synapse/storage/events.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 08807deba9..db01eb6d14 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -320,8 +320,6 @@ class EventsStore(SQLBaseStore): len(event.prev_events) == 1 and not event.is_state() for event, ctx in ev_ctx_rm - if not event.internal_metadata.is_outlier() - and not ctx.rejected ) # Don't bother calculating state if they're just # a long chain of single ancestor non-state events. From 64a2cef9bb2b0cf62b2a72f63c25c4781c8354e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Feb 2017 19:15:36 +0000 Subject: [PATCH 48/57] Pop rather than del from dict --- synapse/handlers/federation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 996bfd0e23..06d4530177 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1096,7 +1096,7 @@ class FederationHandler(BaseHandler): if prev_id != event.event_id: results[(event.type, event.state_key)] = prev_id else: - del results[(event.type, event.state_key)] + results.pop((event.type, event.state_key)) defer.returnValue(results.values()) else: From 9037787f0b655067eb28ae689745e60e7472e7b9 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 28 Feb 2017 00:29:54 +0000 Subject: [PATCH 49/57] merge in right archlinux package, thanks to @saram-kon from https://github.com/matrix-org/synapse/pull/1956 --- contrib/systemd/synapse.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/systemd/synapse.service b/contrib/systemd/synapse.service index 967a4debfd..92d94b9d58 100644 --- a/contrib/systemd/synapse.service +++ b/contrib/systemd/synapse.service @@ -1,5 +1,5 @@ # This assumes that Synapse has been installed as a system package -# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux) +# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux) # rather than in a user home directory or similar under virtualenv. [Unit] From 848cf95ea088ea6bff209d8fdb7bdb809d7aa8fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Feb 2017 10:01:19 +0000 Subject: [PATCH 50/57] Pop with default value to stop throwing --- synapse/handlers/federation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 06d4530177..ed0fa51e7f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1096,7 +1096,7 @@ class FederationHandler(BaseHandler): if prev_id != event.event_id: results[(event.type, event.state_key)] = prev_id else: - results.pop((event.type, event.state_key)) + results.pop((event.type, event.state_key), None) defer.returnValue(results.values()) else: From 8a12b6f1eb16465bb2be816d3f92f07b844564d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Feb 2017 10:15:50 +0000 Subject: [PATCH 51/57] Fix up txn name --- synapse/storage/devices.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index d22db0a0b9..7b5903bf8e 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -517,7 +517,7 @@ class DeviceStore(SQLBaseStore): WHERE stream_id > ? """ return self._execute( - "get_users_and_hosts_device_list", None, + "get_all_device_list_changes_for_remotes", None, sql, from_key, ) From e4919b93297031de041b55473984a2913aeb0c5d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Feb 2017 11:17:27 +0000 Subject: [PATCH 52/57] Add stream_id index to device_lists_outbound_pokes As this is used for replication streaming --- synapse/storage/prepare_database.py | 2 +- .../schema/delta/41/device_outbound_index.sql | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 synapse/storage/schema/delta/41/device_outbound_index.sql diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index b357f22be7..ed84db6b4b 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 40 +SCHEMA_VERSION = 41 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/41/device_outbound_index.sql b/synapse/storage/schema/delta/41/device_outbound_index.sql new file mode 100644 index 0000000000..62f0b9892b --- /dev/null +++ b/synapse/storage/schema/delta/41/device_outbound_index.sql @@ -0,0 +1,16 @@ +/* Copyright 2017 Vector Creations Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes(stream_id); From b84907bdbbc70ede585f65ee092fe0e2013f0d5d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Feb 2017 14:37:29 +0000 Subject: [PATCH 53/57] Intern table column names once --- synapse/storage/_base.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 557701d0c4..4410cd9e62 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -18,7 +18,6 @@ from synapse.api.errors import StoreError from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.caches.descriptors import Cache -from synapse.util.caches import intern_dict from synapse.storage.engines import PostgresEngine import synapse.metrics @@ -356,9 +355,9 @@ class SQLBaseStore(object): Returns: A list of dicts where the key is the column header. """ - col_headers = list(column[0] for column in cursor.description) + col_headers = list(intern(column[0]) for column in cursor.description) results = list( - intern_dict(dict(zip(col_headers, row))) for row in cursor.fetchall() + dict(zip(col_headers, row)) for row in cursor.fetchall() ) return results From e933a2712d3987dac5a209a3a49a2a0664f62213 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Feb 2017 16:22:41 +0000 Subject: [PATCH 54/57] Don't log unknown cache warnings in workers --- synapse/replication/slave/storage/_base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 18076e0f3b..ab133db872 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -54,7 +54,9 @@ class BaseSlavedStore(SQLBaseStore): try: getattr(self, cache_func).invalidate(tuple(keys)) except AttributeError: - logger.info("Got unexpected cache_func: %r", cache_func) + # We probably haven't pulled in the cache in this worker, + # which is fine. + pass self._cache_id_gen.advance(int(stream["position"])) return defer.succeed(None) From f4e7545d8863e24a2327636181a49589d1e78ed8 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Tue, 28 Feb 2017 18:11:52 +0000 Subject: [PATCH 55/57] No longer need to request all the sub-components to be split when running sytest+dendron --- jenkins-dendron-postgres.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index 55ff31fd18..37ae746f4b 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -17,9 +17,3 @@ export SYNAPSE_CACHE_FACTOR=1 ./sytest/jenkins/install_and_run.sh \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ - --pusher \ - --synchrotron \ - --federation-reader \ - --client-reader \ - --appservice \ - --federation-sender \ From 6b1ffa5f3de93618ae748391215e7139099b265f Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Tue, 28 Feb 2017 18:14:13 +0000 Subject: [PATCH 56/57] Added also a control script to run via the crazy dendron+haproxy hybrid we're temporarily using --- jenkins-dendron-haproxy-postgres.sh | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100755 jenkins-dendron-haproxy-postgres.sh diff --git a/jenkins-dendron-haproxy-postgres.sh b/jenkins-dendron-haproxy-postgres.sh new file mode 100755 index 0000000000..d64b2d2c9d --- /dev/null +++ b/jenkins-dendron-haproxy-postgres.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -eux + +: ${WORKSPACE:="$(pwd)"} + +export WORKSPACE +export PYTHONDONTWRITEBYTECODE=yep +export SYNAPSE_CACHE_FACTOR=1 + +export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy + +./jenkins/prepare_synapse.sh +./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git +./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git +./dendron/jenkins/build_dendron.sh +./sytest/jenkins/prep_sytest_for_postgres.sh + +./sytest/jenkins/install_and_run.sh \ + --synapse-directory $WORKSPACE \ + --dendron $WORKSPACE/dendron/bin/dendron \ + --haproxy \